From af632d9b9386591e98e1f02a65547d445bcf6f21 Mon Sep 17 00:00:00 2001 From: TANIGUCHI Takaki Date: Thu, 16 Aug 2018 15:01:54 +0900 Subject: [PATCH] New upstream version 1.10.78+repack --- PKG-INFO | 3 +- botocore.egg-info/PKG-INFO | 3 +- botocore.egg-info/SOURCES.txt | 10 + botocore/__init__.py | 2 +- botocore/configloader.py | 7 +- .../2017-11-09/service-2.json | 19 +- .../data/apigateway/2015-07-09/service-2.json | 9 + .../data/appstream/2016-12-01/service-2.json | 232 +- .../data/appsync/2017-07-25/service-2.json | 27 +- .../autoscaling/2011-01-01/service-2.json | 324 +- botocore/data/ce/2017-10-25/service-2.json | 99 +- .../cloudfront/2018-06-18/examples-1.json | 5 + .../cloudfront/2018-06-18/paginators-1.json | 32 + .../data/cloudfront/2018-06-18/service-2.json | 5197 +++++++++++++++++ .../data/cloudfront/2018-06-18/waiters-2.json | 47 + .../data/cloudhsmv2/2017-04-28/service-2.json | 58 +- .../data/codebuild/2016-10-06/service-2.json | 23 +- .../data/comprehend/2017-11-27/service-2.json | 223 +- .../data/config/2014-11-12/service-2.json | 6 +- .../data/connect/2017-08-08/service-2.json | 1130 +++- botocore/data/dax/2017-04-19/service-2.json | 83 +- .../data/devicefarm/2015-06-23/service-2.json | 135 +- .../directconnect/2012-10-25/service-2.json | 38 +- .../data/dlm/2018-01-12/paginators-1.json | 3 + botocore/data/dlm/2018-01-12/service-2.json | 558 ++ .../data/dynamodb/2012-08-10/service-2.json | 213 + botocore/data/ec2/2016-11-15/service-2.json | 426 +- botocore/data/ecs/2014-11-13/service-2.json | 209 +- botocore/data/efs/2015-02-01/service-2.json | 125 +- botocore/data/elbv2/2015-12-01/service-2.json | 149 +- botocore/data/emr/2009-03-31/service-2.json | 18 +- botocore/data/endpoints.json | 87 +- botocore/data/es/2015-01-01/service-2.json | 290 +- .../data/glacier/2012-06-01/service-2.json | 18 +- botocore/data/glue/2017-03-31/service-2.json | 27 +- .../data/greengrass/2017-06-07/service-2.json | 2 +- .../data/health/2016-08-04/service-2.json | 20 +- botocore/data/iam/2010-05-08/service-2.json | 188 +- .../data/inspector/2016-02-16/service-2.json | 80 +- botocore/data/iot/2015-05-28/service-2.json | 2152 ++++++- .../iotanalytics/2017-11-27/service-2.json | 58 +- .../2017-09-30/service-2.json | 169 +- .../data/kinesis/2013-12-02/service-2.json | 413 +- .../kinesisvideo/2017-09-30/service-2.json | 9 +- botocore/data/kms/2014-11-01/service-2.json | 82 +- botocore/data/logs/2014-03-28/service-2.json | 35 +- .../mediaconvert/2017-08-29/service-2.json | 160 +- .../mediapackage/2017-10-12/service-2.json | 17 + botocore/data/mq/2017-11-27/paginators-1.json | 3 + botocore/data/mq/2017-11-27/service-2.json | 459 +- .../data/pinpoint/2016-12-01/service-2.json | 469 +- botocore/data/polly/2016-06-10/service-2.json | 357 +- botocore/data/rds/2014-10-31/service-2.json | 232 +- .../data/redshift/2012-12-01/service-2.json | 146 +- .../resource-groups/2017-11-27/service-2.json | 60 +- botocore/data/s3/2006-03-01/service-2.json | 1419 ++--- .../data/sagemaker/2017-07-24/service-2.json | 557 +- .../secretsmanager/2017-10-17/service-2.json | 25 +- .../data/snowball/2016-06-30/service-2.json | 139 +- botocore/data/ssm/2014-11-06/service-2.json | 171 +- .../storagegateway/2013-06-30/service-2.json | 153 +- .../data/transcribe/2017-10-26/service-2.json | 6 +- botocore/handlers.py | 7 + botocore/hooks.py | 43 + botocore/model.py | 5 +- botocore/serialize.py | 36 +- botocore/session.py | 42 +- botocore/utils.py | 12 +- docs/source/conf.py | 2 +- docs/source/topics/paginators.rst | 2 +- setup.py | 1 + tests/functional/test_endpoints.py | 2 - tests/functional/test_h2_required.py | 50 + tests/functional/test_kinesis.py | 19 + tests/functional/test_regions.py | 11 +- tests/functional/test_sagemaker.py | 41 + tests/integration/test_smoke.py | 2 +- tests/unit/protocols/input/ec2.json | 21 +- tests/unit/protocols/input/json.json | 33 +- tests/unit/protocols/input/query.json | 19 +- tests/unit/protocols/input/rest-json.json | 84 +- tests/unit/protocols/input/rest-xml.json | 160 +- tests/unit/protocols/output/ec2.json | 68 + tests/unit/protocols/output/json.json | 35 +- tests/unit/protocols/output/query.json | 68 + tests/unit/protocols/output/rest-json.json | 60 +- tests/unit/protocols/output/rest-xml.json | 92 +- tests/unit/test_configloader.py | 20 + tests/unit/test_hooks.py | 63 + tests/unit/test_paginate.py | 4 +- tests/unit/test_session.py | 30 +- tests/unit/test_utils.py | 20 + 92 files changed, 16570 insertions(+), 1898 deletions(-) create mode 100644 botocore/data/cloudfront/2018-06-18/examples-1.json create mode 100644 botocore/data/cloudfront/2018-06-18/paginators-1.json create mode 100644 botocore/data/cloudfront/2018-06-18/service-2.json create mode 100644 botocore/data/cloudfront/2018-06-18/waiters-2.json create mode 100644 botocore/data/dlm/2018-01-12/paginators-1.json create mode 100644 botocore/data/dlm/2018-01-12/service-2.json create mode 100644 botocore/data/mq/2017-11-27/paginators-1.json create mode 100644 tests/functional/test_h2_required.py create mode 100644 tests/functional/test_kinesis.py create mode 100644 tests/functional/test_sagemaker.py diff --git a/PKG-INFO b/PKG-INFO index 069b1160..0eab4492 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.10.55 +Version: 1.10.78 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -55,3 +55,4 @@ Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 069b1160..0eab4492 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.10.55 +Version: 1.10.78 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -55,3 +55,4 @@ Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index b54d5d6f..0f87551c 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -135,6 +135,10 @@ botocore/data/cloudfront/2017-10-30/examples-1.json botocore/data/cloudfront/2017-10-30/paginators-1.json botocore/data/cloudfront/2017-10-30/service-2.json botocore/data/cloudfront/2017-10-30/waiters-2.json +botocore/data/cloudfront/2018-06-18/examples-1.json +botocore/data/cloudfront/2018-06-18/paginators-1.json +botocore/data/cloudfront/2018-06-18/service-2.json +botocore/data/cloudfront/2018-06-18/waiters-2.json botocore/data/cloudhsm/2014-05-30/examples-1.json botocore/data/cloudhsm/2014-05-30/paginators-1.json botocore/data/cloudhsm/2014-05-30/service-2.json @@ -197,6 +201,8 @@ botocore/data/directconnect/2012-10-25/service-2.json botocore/data/discovery/2015-11-01/examples-1.json botocore/data/discovery/2015-11-01/paginators-1.json botocore/data/discovery/2015-11-01/service-2.json +botocore/data/dlm/2018-01-12/paginators-1.json +botocore/data/dlm/2018-01-12/service-2.json botocore/data/dms/2016-01-01/examples-1.json botocore/data/dms/2016-01-01/paginators-1.json botocore/data/dms/2016-01-01/service-2.json @@ -378,6 +384,7 @@ botocore/data/mgh/2017-05-31/paginators-1.json botocore/data/mgh/2017-05-31/service-2.json botocore/data/mobile/2017-07-01/paginators-1.json botocore/data/mobile/2017-07-01/service-2.json +botocore/data/mq/2017-11-27/paginators-1.json botocore/data/mq/2017-11-27/service-2.json botocore/data/mturk/2017-01-17/paginators-1.json botocore/data/mturk/2017-01-17/service-2.json @@ -689,8 +696,10 @@ tests/functional/test_cognito_idp.py tests/functional/test_credentials.py tests/functional/test_ec2.py tests/functional/test_endpoints.py +tests/functional/test_h2_required.py tests/functional/test_history.py tests/functional/test_iot_data.py +tests/functional/test_kinesis.py tests/functional/test_lex.py tests/functional/test_loaders.py tests/functional/test_machinelearning.py @@ -704,6 +713,7 @@ tests/functional/test_regions.py tests/functional/test_retry.py tests/functional/test_route53.py tests/functional/test_s3.py +tests/functional/test_sagemaker.py tests/functional/test_service_alias.py tests/functional/test_service_names.py tests/functional/test_session.py diff --git a/botocore/__init__.py b/botocore/__init__.py index 0720cb70..7e3b6940 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.10.55' +__version__ = '1.10.78' class NullHandler(logging.Handler): diff --git a/botocore/configloader.py b/botocore/configloader.py index b9a4d542..385f05f7 100644 --- a/botocore/configloader.py +++ b/botocore/configloader.py @@ -171,7 +171,12 @@ def raw_config_parse(config_filename, parse_subsections=True): def _unicode_path(path): if isinstance(path, six.text_type): return path - return path.decode(sys.getfilesystemencoding(), 'replace') + # According to the documentation getfilesystemencoding can return None + # on unix in which case the default encoding is used instead. + filesystem_encoding = sys.getfilesystemencoding() + if filesystem_encoding is None: + filesystem_encoding = sys.getdefaultencoding() + return path.decode(filesystem_encoding, 'replace') def _parse_nested(config_value): diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index 7fca8bae..1e5ccc9c 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -356,7 +356,7 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

Lists the Device Event history for up to 30 days. If EventType isn't specified in the request, this returns a list of all device events in reverse chronological order. If EventType is specified, this returns a list of device events for that EventType in reverse chronological order.

" + "documentation":"

Lists the device event history, including device connection status, for up to 30 days.

" }, "ListSkills":{ "name":"ListSkills", @@ -379,7 +379,7 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

Lists all tags for a specific resource.

" + "documentation":"

Lists all tags for the specified resource.

" }, "PutRoomSkillParameter":{ "name":"PutRoomSkillParameter", @@ -1317,6 +1317,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, + "documentation":"

The request failed because this device is no longer registered and therefore no longer managed by this account.

", "exception":true }, "DeviceSerialNumber":{ @@ -1677,15 +1678,15 @@ }, "EventType":{ "shape":"DeviceEventType", - "documentation":"

The event type to filter device events.

" + "documentation":"

The event type to filter device events. If EventType isn't specified, this returns a list of all device events in reverse chronological order. If EventType is specified, this returns a list of device events for that EventType in reverse chronological order.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults.

" + "documentation":"

An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response only includes results beyond the token, up to the value specified by MaxResults. When the end of results is reached, the response has a value of null.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. Required.

" + "documentation":"

The maximum number of results to include in the response. The default value is 50. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" } } }, @@ -1694,11 +1695,11 @@ "members":{ "DeviceEvents":{ "shape":"DeviceEventList", - "documentation":"

" + "documentation":"

The device events requested for the device ARN.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

" + "documentation":"

The token returned to indicate that there is more data available.

" } } }, @@ -1738,7 +1739,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The ARN of the specific resource for which to list tags. Required.

" + "documentation":"

The ARN of the specified resource for which to list tags.

" }, "NextToken":{ "shape":"NextToken", @@ -1755,7 +1756,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

The list of tags requested for the specific resource.

" + "documentation":"

The tags requested for the specified resource.

" }, "NextToken":{ "shape":"NextToken", diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index d5659a14..cc0dc9f6 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -2114,6 +2114,10 @@ "stage":{ "shape":"String", "documentation":"

API stage name of the associated API stage in a usage plan.

" + }, + "throttle":{ + "shape":"MapOfApiStageThrottleSettings", + "documentation":"

Map containing method level throttling information for API stage in a usage plan.

" } }, "documentation":"

API stage name of the associated API stage in a usage plan.

" @@ -5117,6 +5121,11 @@ ] }, "Long":{"type":"long"}, + "MapOfApiStageThrottleSettings":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ThrottleSettings"} + }, "MapOfHeaderValues":{ "type":"map", "key":{"shape":"String"}, diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index a729e04d..5bc8b0af 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -81,7 +81,8 @@ {"shape":"InvalidRoleException"}, {"shape":"ConcurrentModificationException"}, {"shape":"InvalidParameterCombinationException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} ], "documentation":"

Creates a fleet. A fleet consists of streaming instances that run a specified image.

" }, @@ -102,7 +103,8 @@ {"shape":"InvalidRoleException"}, {"shape":"ConcurrentModificationException"}, {"shape":"InvalidParameterCombinationException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} ], "documentation":"

Creates an image builder. An image builder is a virtual machine that is used to create an image.

The initial state of the builder is PENDING. When it is ready, the state is RUNNING.

" }, @@ -215,6 +217,20 @@ ], "documentation":"

Deletes the specified image builder and releases the capacity.

" }, + "DeleteImagePermissions":{ + "name":"DeleteImagePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteImagePermissionsRequest"}, + "output":{"shape":"DeleteImagePermissionsResult"}, + "errors":[ + {"shape":"ResourceNotAvailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes permissions for the specified private image. After you delete permissions for an image, AWS accounts to which you previously granted these permissions can no longer use the image.

" + }, "DeleteStack":{ "name":"DeleteStack", "http":{ @@ -269,6 +285,19 @@ ], "documentation":"

Retrieves a list that describes one or more specified image builders, if the image builder names are provided. Otherwise, all image builders in the account are described.

" }, + "DescribeImagePermissions":{ + "name":"DescribeImagePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagePermissionsRequest"}, + "output":{"shape":"DescribeImagePermissionsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves a list that describes the permissions for a private image that you own.

" + }, "DescribeImages":{ "name":"DescribeImages", "http":{ @@ -278,6 +307,7 @@ "input":{"shape":"DescribeImagesRequest"}, "output":{"shape":"DescribeImagesResult"}, "errors":[ + {"shape":"InvalidParameterCombinationException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

Retrieves a list that describes one or more specified images, if the image names are provided. Otherwise, all images in the account are described.

" @@ -494,6 +524,21 @@ ], "documentation":"

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName and ComputeCapacity attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

" }, + "UpdateImagePermissions":{ + "name":"UpdateImagePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateImagePermissionsRequest"}, + "output":{"shape":"UpdateImagePermissionsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceNotAvailableException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Adds or updates permissions for the specified private image.

" + }, "UpdateStack":{ "name":"UpdateStack", "http":{ @@ -584,6 +629,10 @@ "type":"string", "pattern":"^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" }, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, "AssociateFleetRequest":{ "type":"structure", "required":[ @@ -614,6 +663,16 @@ "USERPOOL" ] }, + "AwsAccountId":{ + "type":"string", + "pattern":"^\\d+$" + }, + "AwsAccountIdList":{ + "type":"list", + "member":{"shape":"AwsAccountId"}, + "max":5, + "min":1 + }, "Boolean":{"type":"boolean"}, "BooleanObject":{"type":"boolean"}, "ComputeCapacity":{ @@ -728,7 +787,6 @@ "type":"structure", "required":[ "Name", - "ImageName", "InstanceType", "ComputeCapacity" ], @@ -741,6 +799,10 @@ "shape":"String", "documentation":"

The name of the image used to create the fleet.

" }, + "ImageArn":{ + "shape":"Arn", + "documentation":"

The ARN of the public, private, or shared image to use.

" + }, "InstanceType":{ "shape":"String", "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

" @@ -796,7 +858,6 @@ "type":"structure", "required":[ "Name", - "ImageName", "InstanceType" ], "members":{ @@ -808,6 +869,10 @@ "shape":"String", "documentation":"

The name of the image used to create the builder.

" }, + "ImageArn":{ + "shape":"Arn", + "documentation":"

The ARN of the public, private, or shared image to use.

" + }, "InstanceType":{ "shape":"String", "documentation":"

The instance type to use when launching the image builder.

" @@ -1013,6 +1078,28 @@ } } }, + "DeleteImagePermissionsRequest":{ + "type":"structure", + "required":[ + "Name", + "SharedAccountId" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the private image.

" + }, + "SharedAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The 12-digit ID of the AWS account for which to delete image permissions.

" + } + } + }, + "DeleteImagePermissionsResult":{ + "type":"structure", + "members":{ + } + }, "DeleteImageRequest":{ "type":"structure", "required":["Name"], @@ -1133,6 +1220,45 @@ } } }, + "DescribeImagePermissionsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the private image for which to describe permissions. The image must be one that you own.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of each results page.

" + }, + "SharedAwsAccountIds":{ + "shape":"AwsAccountIdList", + "documentation":"

The 12-digit ID of one or more AWS accounts with which the image is shared.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination token to use to retrieve the next page of results. If this value is empty, only the first page is retrieved.

" + } + } + }, + "DescribeImagePermissionsResult":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the private image.

" + }, + "SharedImagePermissionsList":{ + "shape":"SharedImagePermissionsList", + "documentation":"

The permissions for a private image that you own.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination token to use to retrieve the next page of results. If this value is empty, only the first page is retrieved.

" + } + } + }, "DescribeImagesMaxResults":{ "type":"integer", "box":true, @@ -1146,13 +1272,21 @@ "shape":"StringList", "documentation":"

The names of the images to describe.

" }, + "Arns":{ + "shape":"ArnList", + "documentation":"

The ARNs of the public, private, and shared images to describe.

" + }, + "Type":{ + "shape":"VisibilityType", + "documentation":"

The type of image (public, private, or shared) to describe.

" + }, "NextToken":{ "shape":"String", "documentation":"

The pagination token to use to retrieve the next page of results. If this value is empty, only the first page is retrieved.

" }, "MaxResults":{ "shape":"DescribeImagesMaxResults", - "documentation":"

The maximum size of each results page.

" + "documentation":"

The maximum size of each page of results.

" } } }, @@ -1165,7 +1299,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The pagination token used to retrieve the next page of results. If this value is empty, only the first page is retrieved.

" + "documentation":"

The pagination token to use to retrieve the next page of results. If there are no more pages, this value is null.

" } } }, @@ -1355,7 +1489,6 @@ "required":[ "Arn", "Name", - "ImageName", "InstanceType", "ComputeCapacityStatus", "State" @@ -1381,6 +1514,10 @@ "shape":"String", "documentation":"

The name of the image used to create the fleet.

" }, + "ImageArn":{ + "shape":"Arn", + "documentation":"

The ARN for the public, private, or shared image.

" + }, "InstanceType":{ "shape":"String", "documentation":"

The instance type to use when launching fleet instances.

" @@ -1571,6 +1708,10 @@ "AppstreamAgentVersion":{ "shape":"AppstreamAgentVersion", "documentation":"

The version of the AppStream 2.0 agent to use for instances that are launched from this image.

" + }, + "ImagePermissions":{ + "shape":"ImagePermissions", + "documentation":"

The permissions to provide to the destination AWS account for the specified image.

" } }, "documentation":"

Describes an image.

" @@ -1685,6 +1826,20 @@ "type":"list", "member":{"shape":"Image"} }, + "ImagePermissions":{ + "type":"structure", + "members":{ + "allowFleet":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether the image can be used for a fleet.

" + }, + "allowImageBuilder":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether the image can be used for an image builder.

" + } + }, + "documentation":"

Describes the permissions for an image.

" + }, "ImageState":{ "type":"string", "enum":[ @@ -1832,6 +1987,12 @@ } }, "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "box":true, + "max":500, + "min":0 + }, "Metadata":{ "type":"map", "key":{"shape":"String"}, @@ -2029,6 +2190,28 @@ "EXPIRED" ] }, + "SharedImagePermissions":{ + "type":"structure", + "required":[ + "sharedAccountId", + "imagePermissions" + ], + "members":{ + "sharedAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The 12-digit ID of the AWS account with which the image is shared.

" + }, + "imagePermissions":{ + "shape":"ImagePermissions", + "documentation":"

Describes the permissions for a shared image.

" + } + }, + "documentation":"

Describes the permissions that are available to the specified AWS account for a shared image.

" + }, + "SharedImagePermissionsList":{ + "type":"list", + "member":{"shape":"SharedImagePermissions"} + }, "Stack":{ "type":"structure", "required":["Name"], @@ -2347,12 +2530,15 @@ }, "UpdateFleetRequest":{ "type":"structure", - "required":["Name"], "members":{ "ImageName":{ "shape":"String", "documentation":"

The name of the image used to create the fleet.

" }, + "ImageArn":{ + "shape":"Arn", + "documentation":"

The ARN of the public, private, or shared image to use.

" + }, "Name":{ "shape":"String", "documentation":"

A unique name for the fleet.

" @@ -2413,6 +2599,33 @@ } } }, + "UpdateImagePermissionsRequest":{ + "type":"structure", + "required":[ + "Name", + "SharedAccountId", + "ImagePermissions" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the private image.

" + }, + "SharedAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The 12-digit ID of the AWS account for which you want add or update image permissions.

" + }, + "ImagePermissions":{ + "shape":"ImagePermissions", + "documentation":"

The permissions for the image.

" + } + } + }, + "UpdateImagePermissionsResult":{ + "type":"structure", + "members":{ + } + }, "UpdateStackRequest":{ "type":"structure", "required":["Name"], @@ -2497,7 +2710,8 @@ "type":"string", "enum":[ "PUBLIC", - "PRIVATE" + "PRIVATE", + "SHARED" ] }, "VpcConfig":{ diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index db339a28..8d8e1747 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -613,6 +613,10 @@ "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", "documentation":"

Amazon Elasticsearch settings.

" + }, + "httpConfig":{ + "shape":"HttpDataSourceConfig", + "documentation":"

Http endpoint settings.

" } } }, @@ -762,7 +766,7 @@ }, "type":{ "shape":"DataSourceType", - "documentation":"

The type of the data source.

" + "documentation":"

The type of the data source.

" }, "serviceRoleArn":{ "shape":"String", @@ -779,6 +783,10 @@ "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", "documentation":"

Amazon Elasticsearch settings.

" + }, + "httpConfig":{ + "shape":"HttpDataSourceConfig", + "documentation":"

Http endpoint settings.

" } }, "documentation":"

Describes a data source.

" @@ -789,7 +797,8 @@ "AWS_LAMBDA", "AMAZON_DYNAMODB", "AMAZON_ELASTICSEARCH", - "NONE" + "NONE", + "HTTP" ] }, "DataSources":{ @@ -1212,6 +1221,16 @@ "type":"list", "member":{"shape":"GraphqlApi"} }, + "HttpDataSourceConfig":{ + "type":"structure", + "members":{ + "endpoint":{ + "shape":"String", + "documentation":"

The Http url endpoint. You can either specify the domain name or ip and port combination and the url scheme must be http(s). If the port is not specified, AWS AppSync will use the default port 80 for http endpoint and port 443 for https endpoints.

" + } + }, + "documentation":"

Describes a Http data source configuration.

" + }, "InternalFailureException":{ "type":"structure", "members":{ @@ -1717,6 +1736,10 @@ "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", "documentation":"

The new Elasticsearch configuration.

" + }, + "httpConfig":{ + "shape":"HttpDataSourceConfig", + "documentation":"

The new http endpoint configuration

" } } }, diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 936ab811..d6a3e2c8 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -22,7 +22,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancerTargetGroups":{ "name":"AttachLoadBalancerTargetGroups", @@ -39,7 +39,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancers":{ "name":"AttachLoadBalancers", @@ -56,7 +56,41 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more Classic Load Balancers to the specified Auto Scaling group.

To attach an Application Load Balancer instead, see AttachLoadBalancerTargetGroups.

To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

Attaches one or more Classic Load Balancers to the specified Auto Scaling group.

To attach an Application Load Balancer instead, see AttachLoadBalancerTargetGroups.

To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + }, + "BatchDeleteScheduledAction":{ + "name":"BatchDeleteScheduledAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteScheduledActionType"}, + "output":{ + "shape":"BatchDeleteScheduledActionAnswer", + "resultWrapper":"BatchDeleteScheduledActionResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ], + "documentation":"

Deletes one or more scheduled actions for the specified Auto Scaling group.

" + }, + "BatchPutScheduledUpdateGroupAction":{ + "name":"BatchPutScheduledUpdateGroupAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchPutScheduledUpdateGroupActionType"}, + "output":{ + "shape":"BatchPutScheduledUpdateGroupActionAnswer", + "resultWrapper":"BatchPutScheduledUpdateGroupActionResult" + }, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ], + "documentation":"

Creates or updates one or more scheduled scaling actions for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged.

" }, "CompleteLifecycleAction":{ "name":"CompleteLifecycleAction", @@ -72,7 +106,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Auto Scaling User Guide.

" + "documentation":"

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateAutoScalingGroup":{ "name":"CreateAutoScalingGroup", @@ -87,7 +121,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Auto Scaling User Guide.

For more information, see Auto Scaling Groups in the Auto Scaling User Guide.

" + "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateLaunchConfiguration":{ "name":"CreateLaunchConfiguration", @@ -101,7 +135,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Auto Scaling User Guide.

For more information, see Launch Configurations in the Auto Scaling User Guide.

" + "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateOrUpdateTags":{ "name":"CreateOrUpdateTags", @@ -116,7 +150,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ResourceInUseFault"} ], - "documentation":"

Creates or updates tags for the specified Auto Scaling group.

When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.

For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling User Guide.

" + "documentation":"

Creates or updates tags for the specified Auto Scaling group.

When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "DeleteAutoScalingGroup":{ "name":"DeleteAutoScalingGroup", @@ -130,7 +164,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Deletes the specified Auto Scaling group.

If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.

If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.

To remove instances from the Auto Scaling group before deleting it, call DetachInstances with the list of instances and the option to decrement the desired capacity so that Auto Scaling does not launch replacement instances.

To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup and set the minimum size and desired capacity of the Auto Scaling group to zero.

" + "documentation":"

Deletes the specified Auto Scaling group.

If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.

If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.

To remove instances from the Auto Scaling group before deleting it, call DetachInstances with the list of instances and the option to decrement the desired capacity so that Amazon EC2 Auto Scaling does not launch replacement instances.

To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup and set the minimum size and desired capacity of the Auto Scaling group to zero.

" }, "DeleteLaunchConfiguration":{ "name":"DeleteLaunchConfiguration", @@ -224,7 +258,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the current Auto Scaling resource limits for your AWS account.

For information about requesting an increase in these limits, see Auto Scaling Limits in the Auto Scaling User Guide.

" + "documentation":"

Describes the current Auto Scaling resource limits for your AWS account.

For information about requesting an increase in these limits, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeAdjustmentTypes":{ "name":"DescribeAdjustmentTypes", @@ -288,7 +322,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the notification types that are supported by Auto Scaling.

" + "documentation":"

Describes the notification types that are supported by Amazon EC2 Auto Scaling.

" }, "DescribeLaunchConfigurations":{ "name":"DescribeLaunchConfigurations", @@ -320,7 +354,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the available types of lifecycle hooks.

" + "documentation":"

Describes the available types of lifecycle hooks.

The following hook types are supported:

" }, "DescribeLifecycleHooks":{ "name":"DescribeLifecycleHooks", @@ -383,7 +417,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the available CloudWatch metrics for Auto Scaling.

Note that the GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling EnableMetricsCollection.

" + "documentation":"

Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.

Note that the GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling EnableMetricsCollection.

" }, "DescribeNotificationConfigurations":{ "name":"DescribeNotificationConfigurations", @@ -499,7 +533,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the termination policies supported by Auto Scaling.

" + "documentation":"

Describes the termination policies supported by Amazon EC2 Auto Scaling.

" }, "DetachInstances":{ "name":"DetachInstances", @@ -515,7 +549,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "DetachLoadBalancerTargetGroups":{ "name":"DetachLoadBalancerTargetGroups", @@ -571,7 +605,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Auto Scaling User Guide.

" + "documentation":"

Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "EnterStandby":{ "name":"EnterStandby", @@ -587,7 +621,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Moves the specified instances into the standby state.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

Moves the specified instances into the standby state.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "ExecutePolicy":{ "name":"ExecutePolicy", @@ -616,7 +650,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Moves the specified instances out of the standby state.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

Moves the specified instances out of the standby state.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "PutLifecycleHook":{ "name":"PutLifecycleHook", @@ -633,7 +667,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling Group.

A lifecycle hook tells Auto Scaling that you want to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle Hooks in the Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

" + "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling Group.

A lifecycle hook tells Amazon EC2 Auto Scaling that you want to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

" }, "PutNotificationConfiguration":{ "name":"PutNotificationConfiguration", @@ -647,7 +681,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information see Getting SNS Notifications When Your Auto Scaling Group Scales in the Auto Scaling User Guide.

" + "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information see Getting SNS Notifications When Your Auto Scaling Group Scales in the Auto Scaling User Guide.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -679,7 +713,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged.

For more information, see Scheduled Scaling in the Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged.

For more information, see Scheduled Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "RecordLifecycleActionHeartbeat":{ "name":"RecordLifecycleActionHeartbeat", @@ -695,7 +729,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using PutLifecycleHook.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Auto Scaling User Guide.

" + "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using PutLifecycleHook.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide.

" }, "ResumeProcesses":{ "name":"ResumeProcesses", @@ -708,7 +742,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Resumes the specified suspended Auto Scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and Resuming Auto Scaling Processes in the Auto Scaling User Guide.

" + "documentation":"

Resumes the specified suspended automatic scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" }, "SetDesiredCapacity":{ "name":"SetDesiredCapacity", @@ -721,7 +755,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the size of the specified Auto Scaling group.

For more information about desired capacity, see What Is Auto Scaling? in the Auto Scaling User Guide.

" + "documentation":"

Sets the size of the specified Auto Scaling group.

For more information about desired capacity, see What Is Amazon EC2 Auto Scaling? in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceHealth":{ "name":"SetInstanceHealth", @@ -733,7 +767,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the health status of the specified instance.

For more information, see Health Checks in the Auto Scaling User Guide.

" + "documentation":"

Sets the health status of the specified instance.

For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceProtection":{ "name":"SetInstanceProtection", @@ -750,7 +784,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Updates the instance protection settings of the specified instances.

For more information, see Instance Protection in the Auto Scaling User Guide.

" + "documentation":"

Updates the instance protection settings of the specified instances.

For more information, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendProcesses":{ "name":"SuspendProcesses", @@ -763,7 +797,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Suspends the specified Auto Scaling processes, or all processes, for the specified Auto Scaling group.

Note that if you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.

To resume processes that have been suspended, use ResumeProcesses.

For more information, see Suspending and Resuming Auto Scaling Processes in the Auto Scaling User Guide.

" + "documentation":"

Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.

Note that if you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.

To resume processes that have been suspended, use ResumeProcesses.

For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" }, "TerminateInstanceInAutoScalingGroup":{ "name":"TerminateInstanceInAutoScalingGroup", @@ -890,7 +924,7 @@ "documentation":"

The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

" } }, - "documentation":"

Describes a policy adjustment type.

For more information, see Dynamic Scaling in the Auto Scaling User Guide.

" + "documentation":"

Describes a policy adjustment type.

For more information, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AdjustmentTypes":{ "type":"list", @@ -1058,7 +1092,7 @@ }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service.

" }, "Instances":{ "shape":"Instances", @@ -1119,7 +1153,7 @@ "members":{ "AutoScalingGroupNames":{ "shape":"AutoScalingGroupNames", - "documentation":"

The names of the Auto Scaling groups. If you omit this parameter, all Auto Scaling groups are described.

" + "documentation":"

The names of the Auto Scaling groups. You can specify up to MaxRecords names. If you omit this parameter, all Auto Scaling groups are described.

" }, "NextToken":{ "shape":"XmlString", @@ -1174,11 +1208,11 @@ }, "LifecycleState":{ "shape":"XmlStringMaxLen32", - "documentation":"

The lifecycle state for the instance. For more information, see Auto Scaling Lifecycle in the Auto Scaling User Guide.

" + "documentation":"

The lifecycle state for the instance. For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide.

" }, "HealthStatus":{ "shape":"XmlStringMaxLen32", - "documentation":"

The last reported health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Auto Scaling should terminate and replace it.

" + "documentation":"

The last reported health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace it.

" }, "LaunchConfigurationName":{ "shape":"XmlStringMaxLen255", @@ -1190,7 +1224,7 @@ }, "ProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

" + "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

" } }, "documentation":"

Describes an EC2 instance associated with an Auto Scaling group.

" @@ -1221,6 +1255,58 @@ "member":{"shape":"XmlStringMaxLen255"}, "min":1 }, + "BatchDeleteScheduledActionAnswer":{ + "type":"structure", + "members":{ + "FailedScheduledActions":{ + "shape":"FailedScheduledUpdateGroupActionRequests", + "documentation":"

The names of the scheduled actions that could not be deleted, including an error message.

" + } + } + }, + "BatchDeleteScheduledActionType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ScheduledActionNames" + ], + "members":{ + "AutoScalingGroupName":{ + "shape":"ResourceName", + "documentation":"

The name of the Auto Scaling group.

" + }, + "ScheduledActionNames":{ + "shape":"ScheduledActionNames", + "documentation":"

The names of the scheduled actions to delete. The maximum number allowed is 50.

" + } + } + }, + "BatchPutScheduledUpdateGroupActionAnswer":{ + "type":"structure", + "members":{ + "FailedScheduledUpdateGroupActions":{ + "shape":"FailedScheduledUpdateGroupActionRequests", + "documentation":"

The names of the scheduled actions that could not be created or updated, including an error message.

" + } + } + }, + "BatchPutScheduledUpdateGroupActionType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ScheduledUpdateGroupActions" + ], + "members":{ + "AutoScalingGroupName":{ + "shape":"ResourceName", + "documentation":"

The name of the Auto Scaling group.

" + }, + "ScheduledUpdateGroupActions":{ + "shape":"ScheduledUpdateGroupActionRequests", + "documentation":"

One or more scheduled actions. The maximum number allowed is 50.

" + } + } + }, "BlockDeviceEbsDeleteOnTermination":{"type":"boolean"}, "BlockDeviceEbsEncrypted":{"type":"boolean"}, "BlockDeviceEbsIops":{ @@ -1256,7 +1342,7 @@ }, "NoDevice":{ "shape":"NoDevice", - "documentation":"

Suppresses a device mapping.

If this parameter is true for the root device, the instance might fail the EC2 health check. Auto Scaling launches a replacement instance if the instance fails the health check.

" + "documentation":"

Suppresses a device mapping.

If this parameter is true for the root device, the instance might fail the EC2 health check. Amazon EC2 Auto Scaling launches a replacement instance if the instance fails the health check.

" } }, "documentation":"

Describes a block device mapping.

" @@ -1292,7 +1378,7 @@ }, "LifecycleActionToken":{ "shape":"LifecycleActionToken", - "documentation":"

A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

" + "documentation":"

A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

" }, "LifecycleActionResult":{ "shape":"LifecycleActionResult", @@ -1327,7 +1413,7 @@ }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance used to create a launch configuration for the group. You must specify one of the following: an EC2 instance, a launch configuration, or a launch template.

When you specify an ID of an instance, Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, with the exception of the block device mapping.

For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Auto Scaling User Guide.

" + "documentation":"

The ID of the instance used to create a launch configuration for the group. You must specify one of the following: an EC2 instance, a launch configuration, or a launch template.

When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, with the exception of the block device mapping.

For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -1343,7 +1429,7 @@ }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -1351,7 +1437,7 @@ }, "LoadBalancerNames":{ "shape":"LoadBalancerNames", - "documentation":"

One or more Classic Load Balancers. To specify an Application Load Balancer, use TargetGroupARNs instead.

For more information, see Using a Load Balancer With an Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

One or more Classic Load Balancers. To specify an Application Load Balancer, use TargetGroupARNs instead.

For more information, see Using a Load Balancer With an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "TargetGroupARNs":{ "shape":"TargetGroupARNs", @@ -1359,11 +1445,11 @@ }, "HealthCheckType":{ "shape":"XmlStringMaxLen32", - "documentation":"

The service to use for the health checks. The valid values are EC2 and ELB.

By default, health checks use Amazon EC2 instance status checks to determine the health of an instance. For more information, see Health Checks in the Auto Scaling User Guide.

" + "documentation":"

The service to use for the health checks. The valid values are EC2 and ELB.

By default, health checks use Amazon EC2 instance status checks to determine the health of an instance. For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide.

" }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default is 0.

This parameter is required if you are adding an ELB health check.

For more information, see Health Checks in the Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default is 0.

This parameter is required if you are adding an ELB health check.

For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide.

" }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", @@ -1371,11 +1457,11 @@ }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", - "documentation":"

A comma-separated list of subnet identifiers for your virtual private cloud (VPC).

If you specify subnets and Availability Zones with this call, ensure that the subnets' Availability Zones match the Availability Zones specified.

For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide.

" + "documentation":"

A comma-separated list of subnet identifiers for your virtual private cloud (VPC).

If you specify subnets and Availability Zones with this call, ensure that the subnets' Availability Zones match the Availability Zones specified.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide.

" + "documentation":"

One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide.

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", @@ -1387,11 +1473,11 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

One or more tags.

For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling User Guide.

" + "documentation":"

One or more tags.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist.

" + "documentation":"

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist.

" } } }, @@ -1429,7 +1515,7 @@ }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, with the exception of the block device mapping.

If you do not specify InstanceId, you must specify both ImageId and InstanceType.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Create a Launch Configuration Using an EC2 Instance in the Auto Scaling User Guide.

" + "documentation":"

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, with the exception of the block device mapping.

If you do not specify InstanceId, you must specify both ImageId and InstanceType.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Create a Launch Configuration Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceType":{ "shape":"XmlStringMaxLen255", @@ -1453,11 +1539,11 @@ }, "SpotPrice":{ "shape":"SpotPrice", - "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Auto Scaling User Guide.

" + "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "IamInstanceProfile":{ "shape":"XmlStringMaxLen1600", - "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

EC2 instances launched with an IAM role will automatically have AWS security credentials available. You can use IAM roles with Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Launch Auto Scaling Instances with an IAM Role in the Auto Scaling User Guide.

" + "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

EC2 instances launched with an IAM role will automatically have AWS security credentials available. You can use IAM roles with Amazon EC2 Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Launch Auto Scaling Instances with an IAM Role in the Amazon EC2 Auto Scaling User Guide.

" }, "EbsOptimized":{ "shape":"EbsOptimized", @@ -1465,11 +1551,11 @@ }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", - "documentation":"

Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide.

If you specify this parameter, be sure to specify at least one subnet when you create your group.

Default: If the instance is launched into a default subnet, the default is to assign a public IP address. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address.

" + "documentation":"

Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this parameter, be sure to specify at least one subnet when you create your group.

Default: If the instance is launched into a default subnet, the default is to assign a public IP address. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address.

" }, "PlacementTenancy":{ "shape":"XmlStringMaxLen64", - "documentation":"

The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware and can only be launched into a VPC.

You must set the value of this parameter to dedicated if want to launch Dedicated Instances into a shared tenancy VPC (VPC with instance placement tenancy attribute set to default).

If you specify this parameter, be sure to specify at least one subnet when you create your group.

For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide.

Valid values: default | dedicated

" + "documentation":"

The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware and can only be launched into a VPC.

You must set the value of this parameter to dedicated if want to launch Dedicated Instances into a shared tenancy VPC (VPC with instance placement tenancy attribute set to default).

If you specify this parameter, be sure to specify at least one subnet when you create your group.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

Valid values: default | dedicated

" } } }, @@ -1643,7 +1729,7 @@ "members":{ "InstanceIds":{ "shape":"InstanceIds", - "documentation":"

The instances to describe; up to 50 instance IDs. If you omit this parameter, all Auto Scaling instances are described. If you specify an ID that does not exist, it is ignored with no error.

" + "documentation":"

The IDs of the instances. You can specify up to MaxRecords IDs. If you omit this parameter, all Auto Scaling instances are described. If you specify an ID that does not exist, it is ignored with no error.

" }, "MaxRecords":{ "shape":"MaxRecords", @@ -1832,7 +1918,7 @@ "members":{ "ActivityIds":{ "shape":"ActivityIds", - "documentation":"

The activity IDs of the desired scaling activities. If you omit this parameter, all activities for the past six weeks are described. If you specify an Auto Scaling group, the results are limited to that group. The list of requested activities cannot contain more than 50 items. If unknown activities are requested, they are ignored with no error.

" + "documentation":"

The activity IDs of the desired scaling activities. You can specify up to 50 IDs. If you omit this parameter, all activities for the past six weeks are described. If unknown activities are requested, they are ignored with no error. If you specify an Auto Scaling group, the results are limited to that group.

" }, "AutoScalingGroupName":{ "shape":"ResourceName", @@ -1857,7 +1943,7 @@ }, "ScheduledActionNames":{ "shape":"ScheduledActionNames", - "documentation":"

Describes one or more scheduled actions. If you omit this parameter, all scheduled actions are described. If you specify an unknown scheduled action, it is ignored with no error.

You can describe up to a maximum of 50 instances with a single call. If there are more items to return, the call returns a token. To get the next set of items, repeat the call with the returned token.

" + "documentation":"

The names of one or more scheduled actions. You can specify up to 50 actions. If you omit this parameter, all scheduled actions are described. If you specify an unknown scheduled action, it is ignored with no error.

" }, "StartTime":{ "shape":"TimestampType", @@ -1899,7 +1985,7 @@ "members":{ "TerminationPolicyTypes":{ "shape":"TerminationPolicies", - "documentation":"

The termination policies supported by Auto Scaling (OldestInstance, OldestLaunchConfiguration, NewestInstance, ClosestToNextInstanceHour, and Default).

" + "documentation":"

The termination policies supported by Amazon EC2 Auto Scaling (OldestInstance, OldestLaunchConfiguration, NewestInstance, ClosestToNextInstanceHour, and Default).

" } } }, @@ -2005,7 +2091,7 @@ }, "VolumeType":{ "shape":"BlockDeviceEbsVolumeType", - "documentation":"

The volume type. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Valid values: standard | io1 | gp2

Default: standard

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Valid values: standard | io1 | gp2

" }, "DeleteOnTermination":{ "shape":"BlockDeviceEbsDeleteOnTermination", @@ -2107,7 +2193,7 @@ }, "HonorCooldown":{ "shape":"HonorCooldown", - "documentation":"

Indicates whether Auto Scaling waits for the cooldown period to complete before executing the policy.

This parameter is not supported if the policy type is StepScaling.

For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide.

" + "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

This parameter is not supported if the policy type is StepScaling.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricValue":{ "shape":"MetricScale", @@ -2142,6 +2228,29 @@ } } }, + "FailedScheduledUpdateGroupActionRequest":{ + "type":"structure", + "required":["ScheduledActionName"], + "members":{ + "ScheduledActionName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the scheduled action.

" + }, + "ErrorCode":{ + "shape":"XmlStringMaxLen64", + "documentation":"

The error code.

" + }, + "ErrorMessage":{ + "shape":"XmlString", + "documentation":"

The error message accompanying the error code.

" + } + }, + "documentation":"

Describes a scheduled action that could not be created, updated, or deleted.

" + }, + "FailedScheduledUpdateGroupActionRequests":{ + "type":"list", + "member":{"shape":"FailedScheduledUpdateGroupActionRequest"} + }, "Filter":{ "type":"structure", "members":{ @@ -2189,7 +2298,7 @@ }, "HealthStatus":{ "shape":"XmlStringMaxLen32", - "documentation":"

The last reported health status of the instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Auto Scaling should terminate and replace it.

" + "documentation":"

The last reported health status of the instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace it.

" }, "LaunchConfigurationName":{ "shape":"XmlStringMaxLen255", @@ -2201,7 +2310,7 @@ }, "ProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

" + "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

" } }, "documentation":"

Describes an EC2 instance.

" @@ -2397,7 +2506,7 @@ }, "Version":{ "shape":"XmlStringMaxLen255", - "documentation":"

The version number, $Latest, or $Default. If the value is $Latest, Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" + "documentation":"

The version number, $Latest, or $Default. If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" } }, "documentation":"

Describes a launch template.

" @@ -2421,11 +2530,11 @@ }, "LifecycleTransition":{ "shape":"LifecycleTransition", - "documentation":"

The state of the EC2 instance to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

" + "documentation":"

The state of the EC2 instance to which you want to attach the lifecycle hook. The following are possible values:

" }, "NotificationTargetARN":{ "shape":"ResourceName", - "documentation":"

The ARN of the target that Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.

" + "documentation":"

The ARN of the target that Amazon EC2 Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.

" }, "RoleARN":{ "shape":"ResourceName", @@ -2433,11 +2542,11 @@ }, "NotificationMetadata":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Additional information that you want to include any time Auto Scaling sends a message to the notification target.

" + "documentation":"

Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.

" }, "HeartbeatTimeout":{ "shape":"HeartbeatTimeout", - "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. If the lifecycle hook times out, Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" + "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" }, "GlobalTimeout":{ "shape":"GlobalTimeout", @@ -2448,7 +2557,7 @@ "documentation":"

Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON. The default value is CONTINUE.

" } }, - "documentation":"

Describes a lifecycle hook, which tells Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Auto Scaling Lifecycle Hooks in the Auto Scaling User Guide.

" + "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookNames":{ "type":"list", @@ -2468,15 +2577,15 @@ }, "LifecycleTransition":{ "shape":"LifecycleTransition", - "documentation":"

The state of the EC2 instance to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

" + "documentation":"

The state of the EC2 instance to which you want to attach the lifecycle hook. The possible values are:

" }, "NotificationMetadata":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Additional information that you want to include any time Auto Scaling sends a message to the notification target.

" + "documentation":"

Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.

" }, "HeartbeatTimeout":{ "shape":"HeartbeatTimeout", - "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. If the lifecycle hook times out, Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" + "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" }, "DefaultResult":{ "shape":"LifecycleActionResult", @@ -2484,14 +2593,14 @@ }, "NotificationTargetARN":{ "shape":"NotificationTargetResourceName", - "documentation":"

The ARN of the target that Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.

" + "documentation":"

The ARN of the target that Amazon EC2 Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.

" }, "RoleARN":{ "shape":"ResourceName", "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.

" } }, - "documentation":"

Describes a lifecycle hook, which tells Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Auto Scaling Lifecycle Hooks in the Auto Scaling User Guide.

" + "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookSpecifications":{ "type":"list", @@ -2759,7 +2868,7 @@ "documentation":"

One of the following processes:

" } }, - "documentation":"

Describes a process type.

For more information, see Auto Scaling Processes in the Auto Scaling User Guide.

" + "documentation":"

Describes a process type.

For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" }, "Processes":{ "type":"list", @@ -2799,7 +2908,7 @@ }, "LifecycleTransition":{ "shape":"LifecycleTransition", - "documentation":"

The instance state to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

" + "documentation":"

The instance state to which you want to attach the lifecycle hook. The possible values are:

This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

" }, "RoleARN":{ "shape":"ResourceName", @@ -2807,15 +2916,15 @@ }, "NotificationTargetARN":{ "shape":"NotificationTargetResourceName", - "documentation":"

The ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic. If you specify an empty string, this overrides the current ARN.

This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key/value pair format when sending notifications to an Amazon SNS topic.

When you specify a notification target, Auto Scaling sends it a test message. Test messages contains the following additional key/value pair: \"Event\": \"autoscaling:TEST_NOTIFICATION\".

" + "documentation":"

The ARN of the notification target that Amazon EC2 Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic. If you specify an empty string, this overrides the current ARN.

This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key/value pair format when sending notifications to an Amazon SNS topic.

When you specify a notification target, Amazon EC2 Auto Scaling sends it a test message. Test messages contains the following additional key/value pair: \"Event\": \"autoscaling:TEST_NOTIFICATION\".

" }, "NotificationMetadata":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Contains additional information that you want to include any time Auto Scaling sends a message to the notification target.

" + "documentation":"

Contains additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.

" }, "HeartbeatTimeout":{ "shape":"HeartbeatTimeout", - "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default is 3600 seconds (1 hour).

If the lifecycle hook times out, Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" + "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default is 3600 seconds (1 hour).

If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" }, "DefaultResult":{ "shape":"LifecycleActionResult", @@ -2841,7 +2950,7 @@ }, "NotificationTypes":{ "shape":"AutoScalingNotificationTypes", - "documentation":"

The type of event that will cause the notification to be sent. For details about notification types supported by Auto Scaling, see DescribeAutoScalingNotificationTypes.

" + "documentation":"

The type of event that will cause the notification to be sent. For details about notification types supported by Amazon EC2 Auto Scaling, see DescribeAutoScalingNotificationTypes.

" } } }, @@ -2866,7 +2975,7 @@ }, "AdjustmentType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

This parameter is supported if the policy type is SimpleScaling or StepScaling.

For more information, see Dynamic Scaling in the Auto Scaling User Guide.

" + "documentation":"

The adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

This parameter is supported if the policy type is SimpleScaling or StepScaling.

For more information, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MinAdjustmentStep":{ "shape":"MinAdjustmentStep", @@ -2882,7 +2991,7 @@ }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.

This parameter is supported if the policy type is SimpleScaling.

For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.

This parameter is supported if the policy type is SimpleScaling.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricAggregationType":{ "shape":"XmlStringMaxLen32", @@ -2923,15 +3032,15 @@ }, "StartTime":{ "shape":"TimestampType", - "documentation":"

The time for this action to start, in \"YYYY-MM-DDThh:mm:ssZ\" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z).

If you specify Recurrence and StartTime, Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule your action in the past, Auto Scaling returns an error message.

" + "documentation":"

The time for this action to start, in \"YYYY-MM-DDThh:mm:ssZ\" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z).

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

" }, "EndTime":{ "shape":"TimestampType", - "documentation":"

The time for the recurring schedule to end. Auto Scaling does not perform the action after this time.

" + "documentation":"

The time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

" }, "Recurrence":{ "shape":"XmlStringMaxLen255", - "documentation":"

The recurring schedule for this action, in Unix cron syntax format. For more information, see Cron in Wikipedia.

" + "documentation":"

The recurring schedule for this action, in Unix cron syntax format. For more information about this format, see Crontab.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -2969,7 +3078,7 @@ }, "LifecycleActionToken":{ "shape":"LifecycleActionToken", - "documentation":"

A token that uniquely identifies a specific lifecycle action associated with an instance. Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

" + "documentation":"

A token that uniquely identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

" }, "InstanceId":{ "shape":"XmlStringMaxLen19", @@ -3189,7 +3298,46 @@ "documentation":"

The number of instances you prefer to maintain in the group.

" } }, - "documentation":"

Describes a scheduled update to an Auto Scaling group.

" + "documentation":"

Describes a scheduled scaling action. Used in response to DescribeScheduledActions.

" + }, + "ScheduledUpdateGroupActionRequest":{ + "type":"structure", + "required":["ScheduledActionName"], + "members":{ + "ScheduledActionName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the scaling action.

" + }, + "StartTime":{ + "shape":"TimestampType", + "documentation":"

The time for the action to start, in \"YYYY-MM-DDThh:mm:ssZ\" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z).

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule the action in the past, Amazon EC2 Auto Scaling returns an error message.

" + }, + "EndTime":{ + "shape":"TimestampType", + "documentation":"

The time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

" + }, + "Recurrence":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The recurring schedule for the action, in Unix cron syntax format. For more information about this format, see Crontab.

" + }, + "MinSize":{ + "shape":"AutoScalingGroupMinSize", + "documentation":"

The minimum size of the group.

" + }, + "MaxSize":{ + "shape":"AutoScalingGroupMaxSize", + "documentation":"

The maximum size of the group.

" + }, + "DesiredCapacity":{ + "shape":"AutoScalingGroupDesiredCapacity", + "documentation":"

The number of EC2 instances that should be running in the group.

" + } + }, + "documentation":"

Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

" + }, + "ScheduledUpdateGroupActionRequests":{ + "type":"list", + "member":{"shape":"ScheduledUpdateGroupActionRequest"} }, "ScheduledUpdateGroupActions":{ "type":"list", @@ -3229,7 +3377,7 @@ }, "HonorCooldown":{ "shape":"HonorCooldown", - "documentation":"

Indicates whether Auto Scaling waits for the cooldown period to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity. By default, Auto Scaling does not honor the cooldown period during manual scaling activities.

" + "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity. By default, Amazon EC2 Auto Scaling does not honor the cooldown period during manual scaling activities.

" } } }, @@ -3246,7 +3394,7 @@ }, "HealthStatus":{ "shape":"XmlStringMaxLen32", - "documentation":"

The health status of the instance. Set to Healthy if you want the instance to remain in service. Set to Unhealthy if you want the instance to be out of service. Auto Scaling will terminate and replace the unhealthy instance.

" + "documentation":"

The health status of the instance. Set to Healthy if you want the instance to remain in service. Set to Unhealthy if you want the instance to be out of service. Amazon EC2 Auto Scaling will terminate and replace the unhealthy instance.

" }, "ShouldRespectGracePeriod":{ "shape":"ShouldRespectGracePeriod", @@ -3277,7 +3425,7 @@ }, "ProtectedFromScaleIn":{ "shape":"ProtectedFromScaleIn", - "documentation":"

Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

" + "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

" } } }, @@ -3323,7 +3471,7 @@ "documentation":"

The reason that the process was suspended.

" } }, - "documentation":"

Describes an Auto Scaling process that has been suspended. For more information, see ProcessType.

" + "documentation":"

Describes an automatic scaling process that has been suspended. For more information, see ProcessType.

" }, "SuspendedProcesses":{ "type":"list", @@ -3494,7 +3642,7 @@ }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -3506,7 +3654,7 @@ }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default is 0.

For more information, see Health Checks in the Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default is 0.

For more information, see Health Checks in the Amazon EC2 Auto Scaling User Guide.

" }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", @@ -3514,11 +3662,11 @@ }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", - "documentation":"

The ID of the subnet, if you are launching into a VPC. You can specify several subnets in a comma-separated list.

When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the subnets' Availability Zones match the values you specify for AvailabilityZones.

For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide.

" + "documentation":"

The ID of the subnet, if you are launching into a VPC. You can specify several subnets in a comma-separated list.

When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the subnets' Availability Zones match the values you specify for AvailabilityZones.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide.

" + "documentation":"

A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide.

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index 9c88df55..edd511dc 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -329,6 +329,62 @@ }, "documentation":"

The EC2 hardware specifications that you want AWS to provide recommendations for.

" }, + "ESInstanceDetails":{ + "type":"structure", + "members":{ + "InstanceClass":{ + "shape":"GenericString", + "documentation":"

The class of instance that AWS recommends.

" + }, + "InstanceSize":{ + "shape":"GenericString", + "documentation":"

The size of instance that AWS recommends.

" + }, + "Region":{ + "shape":"GenericString", + "documentation":"

The AWS Region of the recommended reservation.

" + }, + "CurrentGeneration":{ + "shape":"GenericBoolean", + "documentation":"

Whether the recommendation is for a current generation instance.

" + }, + "SizeFlexEligible":{ + "shape":"GenericBoolean", + "documentation":"

Whether the recommended reservation is size flexible.

" + } + }, + "documentation":"

Details about the ES instances that AWS recommends that you purchase.

" + }, + "ElastiCacheInstanceDetails":{ + "type":"structure", + "members":{ + "Family":{ + "shape":"GenericString", + "documentation":"

The instance family of the recommended reservation.

" + }, + "NodeType":{ + "shape":"GenericString", + "documentation":"

The type of node that AWS recommends.

" + }, + "Region":{ + "shape":"GenericString", + "documentation":"

The AWS Region of the recommended reservation.

" + }, + "ProductDescription":{ + "shape":"GenericString", + "documentation":"

The description of the recommended reservation.

" + }, + "CurrentGeneration":{ + "shape":"GenericBoolean", + "documentation":"

Whether the recommendation is for a current generation instance.

" + }, + "SizeFlexEligible":{ + "shape":"GenericBoolean", + "documentation":"

Whether the recommended reservation is size flexible.

" + } + }, + "documentation":"

Details about the ElastiCache instances that AWS recommends that you purchase.

" + }, "Entity":{"type":"string"}, "ErrorMessage":{"type":"string"}, "Estimated":{"type":"boolean"}, @@ -722,6 +778,18 @@ "RDSInstanceDetails":{ "shape":"RDSInstanceDetails", "documentation":"

The RDS instances that AWS recommends that you purchase.

" + }, + "RedshiftInstanceDetails":{ + "shape":"RedshiftInstanceDetails", + "documentation":"

The Amazon Redshift instances that AWS recommends that you purchase.

" + }, + "ElastiCacheInstanceDetails":{ + "shape":"ElastiCacheInstanceDetails", + "documentation":"

The ElastiCache instances that AWS recommends that you purchase.

" + }, + "ESInstanceDetails":{ + "shape":"ESInstanceDetails", + "documentation":"

The Amazon ES instances that AWS recommends that you purchase.

" } }, "documentation":"

Details about the instances that AWS recommends that you purchase.

" @@ -802,7 +870,10 @@ "enum":[ "NO_UPFRONT", "PARTIAL_UPFRONT", - "ALL_UPFRONT" + "ALL_UPFRONT", + "LIGHT_UTILIZATION", + "MEDIUM_UTILIZATION", + "HEAVY_UTILIZATION" ] }, "PurchasedHours":{"type":"string"}, @@ -848,6 +919,32 @@ }, "documentation":"

Details about the RDS instances that AWS recommends that you purchase.

" }, + "RedshiftInstanceDetails":{ + "type":"structure", + "members":{ + "Family":{ + "shape":"GenericString", + "documentation":"

The instance family of the recommended reservation.

" + }, + "NodeType":{ + "shape":"GenericString", + "documentation":"

The type of node that AWS recommends.

" + }, + "Region":{ + "shape":"GenericString", + "documentation":"

The AWS Region of the recommended reservation.

" + }, + "CurrentGeneration":{ + "shape":"GenericBoolean", + "documentation":"

Whether the recommendation is for a current generation instance.

" + }, + "SizeFlexEligible":{ + "shape":"GenericBoolean", + "documentation":"

Whether the recommended reservation is size flexible.

" + } + }, + "documentation":"

Details about the Amazon Redshift instances that AWS recommends that you purchase.

" + }, "RequestChangedException":{ "type":"structure", "members":{ diff --git a/botocore/data/cloudfront/2018-06-18/examples-1.json b/botocore/data/cloudfront/2018-06-18/examples-1.json new file mode 100644 index 00000000..0ea7e3b0 --- /dev/null +++ b/botocore/data/cloudfront/2018-06-18/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/botocore/data/cloudfront/2018-06-18/paginators-1.json b/botocore/data/cloudfront/2018-06-18/paginators-1.json new file mode 100644 index 00000000..51fbb907 --- /dev/null +++ b/botocore/data/cloudfront/2018-06-18/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff --git a/botocore/data/cloudfront/2018-06-18/service-2.json b/botocore/data/cloudfront/2018-06-18/service-2.json new file mode 100644 index 00000000..6bd2b439 --- /dev/null +++ b/botocore/data/cloudfront/2018-06-18/service-2.json @@ -0,0 +1,5197 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-06-18", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", + "signatureVersion":"v4", + "uid":"cloudfront-2018-06-18" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ], + "documentation":"

Creates a new origin access identity. If you're using Amazon S3 for your origin, you can use an origin access identity to require users to access your content using a CloudFront URL instead of the Amazon S3 URL. For more information about how to use origin access identities, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" + }, + "CreateDistribution":{ + "name":"CreateDistribution2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"}, + {"shape":"TooManyQueryStringParameters"}, + {"shape":"InvalidQueryStringParameters"}, + {"shape":"TooManyDistributionsWithLambdaAssociations"}, + {"shape":"TooManyLambdaFunctionAssociations"}, + {"shape":"InvalidLambdaFunctionAssociation"}, + {"shape":"InvalidOriginReadTimeout"}, + {"shape":"InvalidOriginKeepaliveTimeout"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"}, + {"shape":"IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior"}, + {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"} + ], + "documentation":"

Creates a new web distribution. Send a POST request to the /CloudFront API version/distribution/distribution ID resource.

" + }, + "CreateDistributionWithTags":{ + "name":"CreateDistributionWithTags2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/distribution?WithTags", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionWithTagsRequest"}, + "output":{"shape":"CreateDistributionWithTagsResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"}, + {"shape":"InvalidTagging"}, + {"shape":"TooManyQueryStringParameters"}, + {"shape":"InvalidQueryStringParameters"}, + {"shape":"TooManyDistributionsWithLambdaAssociations"}, + {"shape":"TooManyLambdaFunctionAssociations"}, + {"shape":"InvalidLambdaFunctionAssociation"}, + {"shape":"InvalidOriginReadTimeout"}, + {"shape":"InvalidOriginKeepaliveTimeout"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"}, + {"shape":"IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior"}, + {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"} + ], + "documentation":"

Create a new distribution with tags.

" + }, + "CreateFieldLevelEncryptionConfig":{ + "name":"CreateFieldLevelEncryptionConfig2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/field-level-encryption", + "responseCode":201 + }, + "input":{"shape":"CreateFieldLevelEncryptionConfigRequest"}, + "output":{"shape":"CreateFieldLevelEncryptionConfigResult"}, + "errors":[ + {"shape":"InconsistentQuantities"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchFieldLevelEncryptionProfile"}, + {"shape":"FieldLevelEncryptionConfigAlreadyExists"}, + {"shape":"TooManyFieldLevelEncryptionConfigs"}, + {"shape":"TooManyFieldLevelEncryptionQueryArgProfiles"}, + {"shape":"TooManyFieldLevelEncryptionContentTypeProfiles"}, + {"shape":"QueryArgProfileEmpty"} + ], + "documentation":"

Create a new field-level encryption configuration.

" + }, + "CreateFieldLevelEncryptionProfile":{ + "name":"CreateFieldLevelEncryptionProfile2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/field-level-encryption-profile", + "responseCode":201 + }, + "input":{"shape":"CreateFieldLevelEncryptionProfileRequest"}, + "output":{"shape":"CreateFieldLevelEncryptionProfileResult"}, + "errors":[ + {"shape":"InconsistentQuantities"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchPublicKey"}, + {"shape":"FieldLevelEncryptionProfileAlreadyExists"}, + {"shape":"FieldLevelEncryptionProfileSizeExceeded"}, + {"shape":"TooManyFieldLevelEncryptionProfiles"}, + {"shape":"TooManyFieldLevelEncryptionEncryptionEntities"}, + {"shape":"TooManyFieldLevelEncryptionFieldPatterns"} + ], + "documentation":"

Create a field-level encryption profile.

" + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ], + "documentation":"

Create a new invalidation.

" + }, + "CreatePublicKey":{ + "name":"CreatePublicKey2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/public-key", + "responseCode":201 + }, + "input":{"shape":"CreatePublicKeyRequest"}, + "output":{"shape":"CreatePublicKeyResult"}, + "errors":[ + {"shape":"PublicKeyAlreadyExists"}, + {"shape":"InvalidArgument"}, + {"shape":"TooManyPublicKeys"} + ], + "documentation":"

Add a new public key to CloudFront to use, for example, for field-level encryption. You can add a maximum of 10 public keys with one AWS account.

" + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ], + "documentation":"

Creates a new RMTP distribution. An RTMP distribution is similar to a web distribution, but an RTMP distribution streams media files using the Adobe Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP.

To create a new web distribution, submit a POST request to the CloudFront API version/distribution resource. The request body must include a document with a StreamingDistributionConfig element. The response echoes the StreamingDistributionConfig element and returns other information about the RTMP distribution.

To get the status of your request, use the GET StreamingDistribution API action. When the value of Enabled is true and the value of Status is Deployed, your distribution is ready. A distribution usually deploys in less than 15 minutes.

For more information about web distributions, see Working with RTMP Distributions in the Amazon CloudFront Developer Guide.

Beginning with the 2012-05-05 version of the CloudFront API, we made substantial changes to the format of the XML document that you include in the request body when you create or update a web distribution or an RTMP distribution, and when you invalidate objects. With previous versions of the API, we discovered that it was too easy to accidentally delete one or more values for an element that accepts multiple values, for example, CNAMEs and trusted signers. Our changes for the 2012-05-05 release are intended to prevent these accidental deletions and to notify you when there's a mismatch between the number of values you say you're specifying in the Quantity element and the number of values specified.

" + }, + "CreateStreamingDistributionWithTags":{ + "name":"CreateStreamingDistributionWithTags2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/streaming-distribution?WithTags", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionWithTagsRequest"}, + "output":{"shape":"CreateStreamingDistributionWithTagsResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"}, + {"shape":"InvalidTagging"} + ], + "documentation":"

Create a new streaming distribution with tags.

" + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2018_06_18", + "http":{ + "method":"DELETE", + "requestUri":"/2018-06-18/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ], + "documentation":"

Delete an origin access identity.

" + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2018_06_18", + "http":{ + "method":"DELETE", + "requestUri":"/2018-06-18/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ], + "documentation":"

Delete a distribution.

" + }, + "DeleteFieldLevelEncryptionConfig":{ + "name":"DeleteFieldLevelEncryptionConfig2018_06_18", + "http":{ + "method":"DELETE", + "requestUri":"/2018-06-18/field-level-encryption/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteFieldLevelEncryptionConfigRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"}, + {"shape":"PreconditionFailed"}, + {"shape":"FieldLevelEncryptionConfigInUse"} + ], + "documentation":"

Remove a field-level encryption configuration.

" + }, + "DeleteFieldLevelEncryptionProfile":{ + "name":"DeleteFieldLevelEncryptionProfile2018_06_18", + "http":{ + "method":"DELETE", + "requestUri":"/2018-06-18/field-level-encryption-profile/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteFieldLevelEncryptionProfileRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchFieldLevelEncryptionProfile"}, + {"shape":"PreconditionFailed"}, + {"shape":"FieldLevelEncryptionProfileInUse"} + ], + "documentation":"

Remove a field-level encryption profile.

" + }, + "DeletePublicKey":{ + "name":"DeletePublicKey2018_06_18", + "http":{ + "method":"DELETE", + "requestUri":"/2018-06-18/public-key/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeletePublicKeyRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"PublicKeyInUse"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchPublicKey"}, + {"shape":"PreconditionFailed"} + ], + "documentation":"

Remove a public key you previously added to CloudFront.

" + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2018_06_18", + "http":{ + "method":"DELETE", + "requestUri":"/2018-06-18/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ], + "documentation":"

Delete a streaming distribution. To delete an RTMP distribution using the CloudFront API, perform the following steps.

To delete an RTMP distribution using the CloudFront API:

  1. Disable the RTMP distribution.

  2. Submit a GET Streaming Distribution Config request to get the current configuration and the Etag header for the distribution.

  3. Update the XML document that was returned in the response to your GET Streaming Distribution Config request to change the value of Enabled to false.

  4. Submit a PUT Streaming Distribution Config request to update the configuration for your distribution. In the request body, include the XML document that you updated in Step 3. Then set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GET Streaming Distribution Config request in Step 2.

  5. Review the response to the PUT Streaming Distribution Config request to confirm that the distribution was successfully disabled.

  6. Submit a GET Streaming Distribution Config request to confirm that your changes have propagated. When propagation is complete, the value of Status is Deployed.

  7. Submit a DELETE Streaming Distribution request. Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GET Streaming Distribution Config request in Step 2.

  8. Review the response to your DELETE Streaming Distribution request to confirm that the distribution was successfully deleted.

For information about deleting a distribution using the CloudFront console, see Deleting a Distribution in the Amazon CloudFront Developer Guide.

" + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Get the information about an origin access identity.

" + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Get the configuration information about an origin access identity.

" + }, + "GetDistribution":{ + "name":"GetDistribution2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Get the information about a distribution.

" + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Get the configuration information about a distribution.

" + }, + "GetFieldLevelEncryption":{ + "name":"GetFieldLevelEncryption2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/field-level-encryption/{Id}" + }, + "input":{"shape":"GetFieldLevelEncryptionRequest"}, + "output":{"shape":"GetFieldLevelEncryptionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"} + ], + "documentation":"

Get the field-level encryption configuration information.

" + }, + "GetFieldLevelEncryptionConfig":{ + "name":"GetFieldLevelEncryptionConfig2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/field-level-encryption/{Id}/config" + }, + "input":{"shape":"GetFieldLevelEncryptionConfigRequest"}, + "output":{"shape":"GetFieldLevelEncryptionConfigResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"} + ], + "documentation":"

Get the field-level encryption configuration information.

" + }, + "GetFieldLevelEncryptionProfile":{ + "name":"GetFieldLevelEncryptionProfile2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/field-level-encryption-profile/{Id}" + }, + "input":{"shape":"GetFieldLevelEncryptionProfileRequest"}, + "output":{"shape":"GetFieldLevelEncryptionProfileResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"NoSuchFieldLevelEncryptionProfile"} + ], + "documentation":"

Get the field-level encryption profile information.

" + }, + "GetFieldLevelEncryptionProfileConfig":{ + "name":"GetFieldLevelEncryptionProfileConfig2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/field-level-encryption-profile/{Id}/config" + }, + "input":{"shape":"GetFieldLevelEncryptionProfileConfigRequest"}, + "output":{"shape":"GetFieldLevelEncryptionProfileConfigResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"NoSuchFieldLevelEncryptionProfile"} + ], + "documentation":"

Get the field-level encryption profile configuration information.

" + }, + "GetInvalidation":{ + "name":"GetInvalidation2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Get the information about an invalidation.

" + }, + "GetPublicKey":{ + "name":"GetPublicKey2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/public-key/{Id}" + }, + "input":{"shape":"GetPublicKeyRequest"}, + "output":{"shape":"GetPublicKeyResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"NoSuchPublicKey"} + ], + "documentation":"

Get the public key information.

" + }, + "GetPublicKeyConfig":{ + "name":"GetPublicKeyConfig2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/public-key/{Id}/config" + }, + "input":{"shape":"GetPublicKeyConfigRequest"}, + "output":{"shape":"GetPublicKeyConfigResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"NoSuchPublicKey"} + ], + "documentation":"

Return public key configuration informaation

" + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Gets information about a specified RTMP distribution, including the distribution configuration.

" + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Get the configuration information about a streaming distribution.

" + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

Lists origin access identities.

" + }, + "ListDistributions":{ + "name":"ListDistributions2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

List distributions.

" + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ], + "documentation":"

List the distributions that are associated with a specified AWS WAF web ACL.

" + }, + "ListFieldLevelEncryptionConfigs":{ + "name":"ListFieldLevelEncryptionConfigs2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/field-level-encryption" + }, + "input":{"shape":"ListFieldLevelEncryptionConfigsRequest"}, + "output":{"shape":"ListFieldLevelEncryptionConfigsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

List all field-level encryption configurations that have been created in CloudFront for this account.

" + }, + "ListFieldLevelEncryptionProfiles":{ + "name":"ListFieldLevelEncryptionProfiles2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/field-level-encryption-profile" + }, + "input":{"shape":"ListFieldLevelEncryptionProfilesRequest"}, + "output":{"shape":"ListFieldLevelEncryptionProfilesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

Request a list of field-level encryption profiles that have been created in CloudFront for this account.

" + }, + "ListInvalidations":{ + "name":"ListInvalidations2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ], + "documentation":"

Lists invalidation batches.

" + }, + "ListPublicKeys":{ + "name":"ListPublicKeys2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/public-key" + }, + "input":{"shape":"ListPublicKeysRequest"}, + "output":{"shape":"ListPublicKeysResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

List all public keys that have been added to CloudFront for this account.

" + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

List streaming distributions.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource2018_06_18", + "http":{ + "method":"GET", + "requestUri":"/2018-06-18/tagging" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidTagging"}, + {"shape":"NoSuchResource"} + ], + "documentation":"

List tags for a CloudFront resource.

" + }, + "TagResource":{ + "name":"TagResource2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/tagging?Operation=Tag", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidTagging"}, + {"shape":"NoSuchResource"} + ], + "documentation":"

Add tags to a CloudFront resource.

" + }, + "UntagResource":{ + "name":"UntagResource2018_06_18", + "http":{ + "method":"POST", + "requestUri":"/2018-06-18/tagging?Operation=Untag", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidTagging"}, + {"shape":"NoSuchResource"} + ], + "documentation":"

Remove tags from a CloudFront resource.

" + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2018_06_18", + "http":{ + "method":"PUT", + "requestUri":"/2018-06-18/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ], + "documentation":"

Update an origin access identity.

" + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2018_06_18", + "http":{ + "method":"PUT", + "requestUri":"/2018-06-18/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"}, + {"shape":"TooManyQueryStringParameters"}, + {"shape":"InvalidQueryStringParameters"}, + {"shape":"TooManyDistributionsWithLambdaAssociations"}, + {"shape":"TooManyLambdaFunctionAssociations"}, + {"shape":"InvalidLambdaFunctionAssociation"}, + {"shape":"InvalidOriginReadTimeout"}, + {"shape":"InvalidOriginKeepaliveTimeout"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"}, + {"shape":"IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior"}, + {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"} + ], + "documentation":"

Updates the configuration for a web distribution. Perform the following steps.

For information about updating a distribution using the CloudFront console, see Creating or Updating a Web Distribution Using the CloudFront Console in the Amazon CloudFront Developer Guide.

To update a web distribution using the CloudFront API

  1. Submit a GetDistributionConfig request to get the current configuration and an Etag header for the distribution.

    If you update the distribution again, you need to get a new Etag header.

  2. Update the XML document that was returned in the response to your GetDistributionConfig request to include the desired changes. You can't change the value of CallerReference. If you try to change this value, CloudFront returns an IllegalUpdate error. Note that you must strip out the ETag parameter that is returned.

    The new configuration replaces the existing configuration; the values that you specify in an UpdateDistribution request are not merged into the existing configuration. When you add, delete, or replace values in an element that allows multiple values (for example, CNAME), you must specify all of the values that you want to appear in the updated distribution. In addition, you must update the corresponding Quantity element.

  3. Submit an UpdateDistribution request to update the configuration for your distribution:

    • In the request body, include the XML document that you updated in Step 2. The request body must include an XML document with a DistributionConfig element.

    • Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GetDistributionConfig request in Step 1.

  4. Review the response to the UpdateDistribution request to confirm that the configuration was successfully updated.

  5. Optional: Submit a GetDistribution request to confirm that your changes have propagated. When propagation is complete, the value of Status is Deployed.

    Beginning with the 2012-05-05 version of the CloudFront API, we made substantial changes to the format of the XML document that you include in the request body when you create or update a distribution. With previous versions of the API, we discovered that it was too easy to accidentally delete one or more values for an element that accepts multiple values, for example, CNAMEs and trusted signers. Our changes for the 2012-05-05 release are intended to prevent these accidental deletions and to notify you when there's a mismatch between the number of values you say you're specifying in the Quantity element and the number of values you're actually specifying.

" + }, + "UpdateFieldLevelEncryptionConfig":{ + "name":"UpdateFieldLevelEncryptionConfig2018_06_18", + "http":{ + "method":"PUT", + "requestUri":"/2018-06-18/field-level-encryption/{Id}/config" + }, + "input":{"shape":"UpdateFieldLevelEncryptionConfigRequest"}, + "output":{"shape":"UpdateFieldLevelEncryptionConfigResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InconsistentQuantities"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchFieldLevelEncryptionProfile"}, + {"shape":"NoSuchFieldLevelEncryptionConfig"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyFieldLevelEncryptionQueryArgProfiles"}, + {"shape":"TooManyFieldLevelEncryptionContentTypeProfiles"}, + {"shape":"QueryArgProfileEmpty"} + ], + "documentation":"

Update a field-level encryption configuration.

" + }, + "UpdateFieldLevelEncryptionProfile":{ + "name":"UpdateFieldLevelEncryptionProfile2018_06_18", + "http":{ + "method":"PUT", + "requestUri":"/2018-06-18/field-level-encryption-profile/{Id}/config" + }, + "input":{"shape":"UpdateFieldLevelEncryptionProfileRequest"}, + "output":{"shape":"UpdateFieldLevelEncryptionProfileResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"FieldLevelEncryptionProfileAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InconsistentQuantities"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchPublicKey"}, + {"shape":"NoSuchFieldLevelEncryptionProfile"}, + {"shape":"PreconditionFailed"}, + {"shape":"FieldLevelEncryptionProfileSizeExceeded"}, + {"shape":"TooManyFieldLevelEncryptionEncryptionEntities"}, + {"shape":"TooManyFieldLevelEncryptionFieldPatterns"} + ], + "documentation":"

Update a field-level encryption profile.

" + }, + "UpdatePublicKey":{ + "name":"UpdatePublicKey2018_06_18", + "http":{ + "method":"PUT", + "requestUri":"/2018-06-18/public-key/{Id}/config" + }, + "input":{"shape":"UpdatePublicKeyRequest"}, + "output":{"shape":"UpdatePublicKeyResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CannotChangeImmutablePublicKeyFields"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"IllegalUpdate"}, + {"shape":"NoSuchPublicKey"}, + {"shape":"PreconditionFailed"} + ], + "documentation":"

Update public key information. Note that the only value you can change is the comment.

" + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2018_06_18", + "http":{ + "method":"PUT", + "requestUri":"/2018-06-18/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ], + "documentation":"

Update a streaming distribution.

" + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Access denied.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

Enabled is true if any of the AWS accounts listed in the TrustedSigners complex type for this RTMP distribution have active CloudFront key pairs. If not, Enabled is false.

For more information, see ActiveTrustedSigners.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

A complex type that contains one Signer complex type for each trusted signer specified in the TrustedSigners complex type.

For more information, see ActiveTrustedSigners.

" + }, + "Items":{ + "shape":"SignerList", + "documentation":"

A complex type that contains one Signer complex type for each trusted signer that is specified in the TrustedSigners complex type.

For more information, see ActiveTrustedSigners.

" + } + }, + "documentation":"

A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for this distribution. These are the accounts that you want to allow to create signed URLs for private content.

The Signer complex type lists the AWS account number of the trusted signer or self if the signer is the AWS account that created the distribution. The Signer element also includes the IDs of any active CloudFront key pairs that are associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create signed URLs.

For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of CNAME aliases, if any, that you want to associate with this distribution.

" + }, + "Items":{ + "shape":"AliasList", + "documentation":"

A complex type that contains the CNAME aliases, if any, that you want to associate with this distribution.

" + } + }, + "documentation":"

A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.

" + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD, and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).

" + }, + "Items":{ + "shape":"MethodsList", + "documentation":"

A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.

" + }, + "CachedMethods":{"shape":"CachedMethods"} + }, + "documentation":"

A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices:

If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you might not want users to have permissions to delete objects from your origin.

" + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{ + "shape":"string", + "documentation":"

The pattern (for example, images/*.jpg) that specifies which requests to apply the behavior to. When CloudFront receives a viewer request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution.

You can optionally include a slash (/) at the beginning of the path pattern. For example, /images/*.jpg. CloudFront behavior is the same with or without the leading /.

The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.

For more information, see Path Pattern in the Amazon CloudFront Developer Guide.

" + }, + "TargetOriginId":{ + "shape":"string", + "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.

" + }, + "ForwardedValues":{ + "shape":"ForwardedValues", + "documentation":"

A complex type that specifies how CloudFront handles query strings and cookies.

" + }, + "TrustedSigners":{ + "shape":"TrustedSigners", + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + }, + "ViewerProtocolPolicy":{ + "shape":"ViewerProtocolPolicy", + "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + }, + "MinTTL":{ + "shape":"long", + "documentation":"

The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide.

You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers, if you specify 1 for Quantity and * for Name).

" + }, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{ + "shape":"boolean", + "documentation":"

Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false. If you specify true for SmoothStreaming, you can still distribute other content using this cache behavior if the content matches the value of PathPattern.

" + }, + "DefaultTTL":{ + "shape":"long", + "documentation":"

The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + }, + "MaxTTL":{ + "shape":"long", + "documentation":"

The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + }, + "Compress":{ + "shape":"boolean", + "documentation":"

Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true; if not, specify false. For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide.

" + }, + "LambdaFunctionAssociations":{ + "shape":"LambdaFunctionAssociations", + "documentation":"

A complex type that contains zero or more Lambda function associations for a cache behavior.

" + }, + "FieldLevelEncryptionId":{ + "shape":"string", + "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.

" + } + }, + "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current limit on the number of cache behaviors that you can add to a distribution, see Amazon CloudFront Limits in the AWS General Reference.

If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behaviors in the Amazon CloudFront Developer Guide.

" + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of cache behaviors for this distribution.

" + }, + "Items":{ + "shape":"CacheBehaviorList", + "documentation":"

Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items.

" + } + }, + "documentation":"

A complex type that contains zero or more CacheBehavior elements.

" + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).

" + }, + "Items":{ + "shape":"MethodsList", + "documentation":"

A complex type that contains the HTTP methods that you want CloudFront to cache responses to.

" + } + }, + "documentation":"

A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices:

If you pick the second choice for your Amazon S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for the responses to be cached correctly.

" + }, + "CannotChangeImmutablePublicKeyFields":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You can't change the value of a public key.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam", + "acm" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The ID for the origin access identity, for example, E74FTE3AJFJ256A.

" + }, + "S3CanonicalUserId":{ + "shape":"string", + "documentation":"

The Amazon S3 canonical user ID for the origin access identity, used when giving the origin access identity read permission to an object in Amazon S3.

" + }, + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "documentation":"

The current configuration information for the identity.

" + } + }, + "documentation":"

CloudFront origin access identity.

" + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{ + "shape":"string", + "documentation":"

A unique number that ensures the request can't be replayed.

If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created.

If the CallerReference is a value already sent in a previous identity request, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request.

If the CallerReference is a value you already sent in a previous request to create an identity, but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

Any comments you want to include about the origin access identity.

" + } + }, + "documentation":"

Origin access identity configuration. Send a GET request to the /CloudFront API version/CloudFront/identity ID/config resource.

" + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).

" + }, + "NextMarker":{ + "shape":"string", + "documentation":"

If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The maximum number of origin access identities you want in the response body.

" + }, + "IsTruncated":{ + "shape":"boolean", + "documentation":"

A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of CloudFront origin access identities that were created by the current AWS account.

" + }, + "Items":{ + "shape":"CloudFrontOriginAccessIdentitySummaryList", + "documentation":"

A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account.

" + } + }, + "documentation":"

Lists the origin access identities for CloudFront.Send a GET request to the /CloudFront API version/origin-access-identity/cloudfront resource. The response includes a CloudFrontOriginAccessIdentityList element with zero or more CloudFrontOriginAccessIdentitySummary child elements. By default, your entire list of origin access identities is returned in one single page. If the list is long, you can paginate it using the MaxItems and Marker parameters.

" + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The ID for the origin access identity. For example: E74FTE3AJFJ256A.

" + }, + "S3CanonicalUserId":{ + "shape":"string", + "documentation":"

The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

The comment for this origin access identity, as originally specified when created.

" + } + }, + "documentation":"

Summary of the information about a CloudFront origin access identity.

" + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "ContentTypeProfile":{ + "type":"structure", + "required":[ + "Format", + "ContentType" + ], + "members":{ + "Format":{ + "shape":"Format", + "documentation":"

The format for a field-level encryption content type-profile mapping.

" + }, + "ProfileId":{ + "shape":"string", + "documentation":"

The profile ID for a field-level encryption content type-profile mapping.

" + }, + "ContentType":{ + "shape":"string", + "documentation":"

The content type for a field-level encryption content type-profile mapping.

" + } + }, + "documentation":"

A field-level encryption content type profile.

" + }, + "ContentTypeProfileConfig":{ + "type":"structure", + "required":["ForwardWhenContentTypeIsUnknown"], + "members":{ + "ForwardWhenContentTypeIsUnknown":{ + "shape":"boolean", + "documentation":"

The setting in a field-level encryption content type-profile mapping that specifies what to do when an unknown content type is provided for the profile. If true, content is forwarded without being encrypted when the content type is unknown. If false (the default), an error is returned when the content type is unknown.

" + }, + "ContentTypeProfiles":{ + "shape":"ContentTypeProfiles", + "documentation":"

The configuration for a field-level encryption content type-profile.

" + } + }, + "documentation":"

The configuration for a field-level encryption content type-profile mapping.

" + }, + "ContentTypeProfileList":{ + "type":"list", + "member":{ + "shape":"ContentTypeProfile", + "locationName":"ContentTypeProfile" + } + }, + "ContentTypeProfiles":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of field-level encryption content type-profile mappings.

" + }, + "Items":{ + "shape":"ContentTypeProfileList", + "documentation":"

Items in a field-level encryption content type-profile mapping.

" + } + }, + "documentation":"

Field-level encryption content type-profile.

" + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.

" + }, + "Items":{ + "shape":"CookieNameList", + "documentation":"

A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.

" + } + }, + "documentation":"

A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide.

" + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{ + "shape":"ItemSelection", + "documentation":"

Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.

Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.

" + }, + "WhitelistedNames":{ + "shape":"CookieNames", + "documentation":"

Required if you specify whitelist for the value of Forward:. A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.

If you specify all or none for the value of Forward, omit WhitelistedNames. If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.

For the current limit on the number of cookie names that you can whitelist for each cache behavior, see Amazon CloudFront Limits in the AWS General Reference.

" + } + }, + "documentation":"

A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide.

" + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "documentation":"

The current configuration information for the identity.

", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to create a new origin access identity.

", + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{ + "shape":"CloudFrontOriginAccessIdentity", + "documentation":"

The origin access identity's information.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the origin access identity created.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "documentation":"

The distribution's configuration information.

", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to create a new distribution.

", + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{ + "shape":"Distribution", + "documentation":"

The distribution's information.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the distribution created.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Distribution" + }, + "CreateDistributionWithTagsRequest":{ + "type":"structure", + "required":["DistributionConfigWithTags"], + "members":{ + "DistributionConfigWithTags":{ + "shape":"DistributionConfigWithTags", + "documentation":"

The distribution's configuration information.

", + "locationName":"DistributionConfigWithTags", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to create a new distribution with tags.

", + "payload":"DistributionConfigWithTags" + }, + "CreateDistributionWithTagsResult":{ + "type":"structure", + "members":{ + "Distribution":{ + "shape":"Distribution", + "documentation":"

The distribution's information.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the distribution created.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Distribution" + }, + "CreateFieldLevelEncryptionConfigRequest":{ + "type":"structure", + "required":["FieldLevelEncryptionConfig"], + "members":{ + "FieldLevelEncryptionConfig":{ + "shape":"FieldLevelEncryptionConfig", + "documentation":"

The request to create a new field-level encryption configuration.

", + "locationName":"FieldLevelEncryptionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "payload":"FieldLevelEncryptionConfig" + }, + "CreateFieldLevelEncryptionConfigResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryption":{ + "shape":"FieldLevelEncryption", + "documentation":"

Returned when you create a new field-level encryption configuration.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new configuration resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/field-level-encryption-config/EDFDVBD632BHDS5.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the field level encryption configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryption" + }, + "CreateFieldLevelEncryptionProfileRequest":{ + "type":"structure", + "required":["FieldLevelEncryptionProfileConfig"], + "members":{ + "FieldLevelEncryptionProfileConfig":{ + "shape":"FieldLevelEncryptionProfileConfig", + "documentation":"

The request to create a field-level encryption profile.

", + "locationName":"FieldLevelEncryptionProfileConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "payload":"FieldLevelEncryptionProfileConfig" + }, + "CreateFieldLevelEncryptionProfileResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionProfile":{ + "shape":"FieldLevelEncryptionProfile", + "documentation":"

Returned when you create a new field-level encryption profile.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new profile resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/field-level-encryption-profile/EDFDVBD632BHDS5.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the field level encryption profile. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryptionProfile" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "documentation":"

The distribution's id.

", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "documentation":"

The batch information for the invalidation.

", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to create an invalidation.

", + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.

", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{ + "shape":"Invalidation", + "documentation":"

The invalidation's information.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Invalidation" + }, + "CreatePublicKeyRequest":{ + "type":"structure", + "required":["PublicKeyConfig"], + "members":{ + "PublicKeyConfig":{ + "shape":"PublicKeyConfig", + "documentation":"

The request to add a public key to CloudFront.

", + "locationName":"PublicKeyConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "payload":"PublicKeyConfig" + }, + "CreatePublicKeyResult":{ + "type":"structure", + "members":{ + "PublicKey":{ + "shape":"PublicKey", + "documentation":"

Returned when you add a public key.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new public key resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/cloudfront-public-key/EDFDVBD632BHDS5.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the public key. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"PublicKey" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "documentation":"

The streaming distribution's configuration information.

", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to create a new streaming distribution.

", + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{ + "shape":"StreamingDistribution", + "documentation":"

The streaming distribution's information.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the streaming distribution created.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"StreamingDistribution" + }, + "CreateStreamingDistributionWithTagsRequest":{ + "type":"structure", + "required":["StreamingDistributionConfigWithTags"], + "members":{ + "StreamingDistributionConfigWithTags":{ + "shape":"StreamingDistributionConfigWithTags", + "documentation":"

The streaming distribution's configuration information.

", + "locationName":"StreamingDistributionConfigWithTags", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to create a new streaming distribution with tags.

", + "payload":"StreamingDistributionConfigWithTags" + }, + "CreateStreamingDistributionWithTagsResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{ + "shape":"StreamingDistribution", + "documentation":"

The streaming distribution's information.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{ + "shape":"integer", + "documentation":"

The HTTP status code for which you want to specify a custom error page and/or a caching duration.

" + }, + "ResponsePagePath":{ + "shape":"string", + "documentation":"

The path to the custom error page that you want CloudFront to return to a viewer when your origin returns the HTTP status code specified by ErrorCode, for example, /4xx-errors/403-forbidden.html. If you want to store your objects and your custom error pages in different locations, your distribution must include a cache behavior for which the following is true:

If you specify a value for ResponsePagePath, you must also specify a value for ResponseCode. If you don't want to specify a value, include an empty element, <ResponsePagePath>, in the XML document.

We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom error pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get the files that you want to return to viewers because the origin server is unavailable.

" + }, + "ResponseCode":{ + "shape":"string", + "documentation":"

The HTTP status code that you want CloudFront to return to the viewer along with the custom error page. There are a variety of reasons that you might want CloudFront to return a status code different from the status code that your origin returned to CloudFront, for example:

If you specify a value for ResponseCode, you must also specify a value for ResponsePagePath. If you don't want to specify a value, include an empty element, <ResponseCode>, in the XML document.

" + }, + "ErrorCachingMinTTL":{ + "shape":"long", + "documentation":"

The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status code specified in ErrorCode. When this time period has elapsed, CloudFront queries your origin to see whether the problem that caused the error has been resolved and the requested object is now available.

If you don't want to specify a value, include an empty element, <ErrorCachingMinTTL>, in the XML document.

For more information, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" + } + }, + "documentation":"

A complex type that controls:

For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of HTTP status codes for which you want to specify a custom error page and/or a caching duration. If Quantity is 0, you can omit Items.

" + }, + "Items":{ + "shape":"CustomErrorResponseList", + "documentation":"

A complex type that contains a CustomErrorResponse element for each HTTP status code for which you want to specify a custom error page and/or a caching duration.

" + } + }, + "documentation":"

A complex type that controls:

For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" + }, + "CustomHeaders":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of custom headers, if any, for this distribution.

" + }, + "Items":{ + "shape":"OriginCustomHeadersList", + "documentation":"

Optional: A list that contains one OriginCustomHeader element for each custom header that you want CloudFront to forward to the origin. If Quantity is 0, omit Items.

" + } + }, + "documentation":"

A complex type that contains the list of Custom Headers for each origin.

" + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{ + "shape":"integer", + "documentation":"

The HTTP port the custom origin listens on.

" + }, + "HTTPSPort":{ + "shape":"integer", + "documentation":"

The HTTPS port the custom origin listens on.

" + }, + "OriginProtocolPolicy":{ + "shape":"OriginProtocolPolicy", + "documentation":"

The origin protocol policy to apply to your origin.

" + }, + "OriginSslProtocols":{ + "shape":"OriginSslProtocols", + "documentation":"

The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.

" + }, + "OriginReadTimeout":{ + "shape":"integer", + "documentation":"

You can create a custom origin read timeout. All timeout units are in seconds. The default origin read timeout is 30 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 4 seconds; the maximum is 60 seconds.

If you need to increase the maximum time limit, contact the AWS Support Center.

" + }, + "OriginKeepaliveTimeout":{ + "shape":"integer", + "documentation":"

You can create a custom keep-alive timeout. All timeout units are in seconds. The default keep-alive timeout is 5 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 1 second; the maximum is 60 seconds.

If you need to increase the maximum time limit, contact the AWS Support Center.

" + } + }, + "documentation":"

A customer origin or an Amazon S3 bucket configured as a website endpoint.

" + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{ + "shape":"string", + "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.

" + }, + "ForwardedValues":{ + "shape":"ForwardedValues", + "documentation":"

A complex type that specifies how CloudFront handles query strings and cookies.

" + }, + "TrustedSigners":{ + "shape":"TrustedSigners", + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + }, + "ViewerProtocolPolicy":{ + "shape":"ViewerProtocolPolicy", + "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + }, + "MinTTL":{ + "shape":"long", + "documentation":"

The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon Amazon CloudFront Developer Guide.

You must specify 0 for MinTTL if you configure CloudFront to forward all headers to your origin (under Headers, if you specify 1 for Quantity and * for Name).

" + }, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{ + "shape":"boolean", + "documentation":"

Indicates whether you want to distribute media files in the Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false. If you specify true for SmoothStreaming, you can still distribute other content using this cache behavior if the content matches the value of PathPattern.

" + }, + "DefaultTTL":{ + "shape":"long", + "documentation":"

The default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. For more information, see Specifying How Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + }, + "MaxTTL":{"shape":"long"}, + "Compress":{ + "shape":"boolean", + "documentation":"

Whether you want CloudFront to automatically compress certain files for this cache behavior. If so, specify true; if not, specify false. For more information, see Serving Compressed Files in the Amazon CloudFront Developer Guide.

" + }, + "LambdaFunctionAssociations":{ + "shape":"LambdaFunctionAssociations", + "documentation":"

A complex type that contains zero or more Lambda function associations for a cache behavior.

" + }, + "FieldLevelEncryptionId":{ + "shape":"string", + "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.

" + } + }, + "documentation":"

A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The origin access identity's ID.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "documentation":"

Deletes a origin access identity.

" + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The distribution ID.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "documentation":"

This action deletes a web distribution. To delete a web distribution using the CloudFront API, perform the following steps.

To delete a web distribution using the CloudFront API:

  1. Disable the web distribution

  2. Submit a GET Distribution Config request to get the current configuration and the Etag header for the distribution.

  3. Update the XML document that was returned in the response to your GET Distribution Config request to change the value of Enabled to false.

  4. Submit a PUT Distribution Config request to update the configuration for your distribution. In the request body, include the XML document that you updated in Step 3. Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GET Distribution Config request in Step 2.

  5. Review the response to the PUT Distribution Config request to confirm that the distribution was successfully disabled.

  6. Submit a GET Distribution request to confirm that your changes have propagated. When propagation is complete, the value of Status is Deployed.

  7. Submit a DELETE Distribution request. Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GET Distribution Config request in Step 6.

  8. Review the response to your DELETE Distribution request to confirm that the distribution was successfully deleted.

For information about deleting a distribution using the CloudFront console, see Deleting a Distribution in the Amazon CloudFront Developer Guide.

" + }, + "DeleteFieldLevelEncryptionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The ID of the configuration you want to delete from CloudFront.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the configuration identity to delete. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteFieldLevelEncryptionProfileRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Request the ID of the profile you want to delete from CloudFront.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the profile to delete. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeletePublicKeyRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The ID of the public key you want to remove from CloudFront.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the public key identity to delete. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The distribution ID.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "documentation":"

The request to delete a streaming distribution.

" + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "ARN", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier for the distribution. For example: EDFDVBD632BHDS5.

" + }, + "ARN":{ + "shape":"string", + "documentation":"

The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID.

" + }, + "Status":{ + "shape":"string", + "documentation":"

This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated to all CloudFront edge locations.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The date and time the distribution was last modified.

" + }, + "InProgressInvalidationBatches":{ + "shape":"integer", + "documentation":"

The number of invalidation batches currently in progress.

" + }, + "DomainName":{ + "shape":"string", + "documentation":"

The domain name corresponding to the distribution, for example, d111111abcdef8.cloudfront.net.

" + }, + "ActiveTrustedSigners":{ + "shape":"ActiveTrustedSigners", + "documentation":"

CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.

" + }, + "DistributionConfig":{ + "shape":"DistributionConfig", + "documentation":"

The current configuration information for the distribution. Send a GET request to the /CloudFront API version/distribution ID/config resource.

" + } + }, + "documentation":"

The distribution's information.

" + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The caller reference you attempted to create the distribution with is associated with another distribution.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{ + "shape":"string", + "documentation":"

A unique value (for example, a date-time stamp) that ensures that the request can't be replayed.

If the value of CallerReference is new (regardless of the content of the DistributionConfig object), CloudFront creates a new distribution.

If CallerReference is a value you already sent in a previous request to create a distribution, and if the content of the DistributionConfig is identical to the original request (ignoring white space), CloudFront returns the same the response that it returned to the original request.

If CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.

" + }, + "Aliases":{ + "shape":"Aliases", + "documentation":"

A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.

" + }, + "DefaultRootObject":{ + "shape":"string", + "documentation":"

The object that you want CloudFront to request from your origin (for example, index.html) when a viewer requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/product-description.html). Specifying a default root object avoids exposing the contents of your distribution.

Specify only the object name, for example, index.html. Don't add a / before the object name.

If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element.

To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element.

To replace the default root object, update the distribution configuration and specify the new object.

For more information about the default root object, see Creating a Default Root Object in the Amazon CloudFront Developer Guide.

" + }, + "Origins":{ + "shape":"Origins", + "documentation":"

A complex type that contains information about origins for this distribution.

" + }, + "DefaultCacheBehavior":{ + "shape":"DefaultCacheBehavior", + "documentation":"

A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" + }, + "CacheBehaviors":{ + "shape":"CacheBehaviors", + "documentation":"

A complex type that contains zero or more CacheBehavior elements.

" + }, + "CustomErrorResponses":{ + "shape":"CustomErrorResponses", + "documentation":"

A complex type that controls the following:

For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

Any comments you want to include about the distribution.

If you don't want to specify a comment, include an empty Comment element.

To delete an existing comment, update the distribution configuration and include an empty Comment element.

To add or change a comment, update the distribution configuration and specify the new comment.

" + }, + "Logging":{ + "shape":"LoggingConfig", + "documentation":"

A complex type that controls whether access logs are written for the distribution.

For more information about logging, see Access Logs in the Amazon CloudFront Developer Guide.

" + }, + "PriceClass":{ + "shape":"PriceClass", + "documentation":"

The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify PriceClass_All, CloudFront responds to requests for your objects from all CloudFront edge locations.

If you specify a price class other than PriceClass_All, CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.

For more information about price classes, see Choosing the Price Class for a CloudFront Distribution in the Amazon CloudFront Developer Guide. For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see Amazon CloudFront Pricing. For price class information, scroll down to see the table at the bottom of the page.

" + }, + "Enabled":{ + "shape":"boolean", + "documentation":"

From this field, you can enable or disable the selected distribution.

" + }, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{ + "shape":"string", + "documentation":"

A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.

AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF, see the AWS WAF Developer Guide.

" + }, + "HttpVersion":{ + "shape":"HttpVersion", + "documentation":"

(Optional) Specify the maximum HTTP version that you want viewers to use to communicate with CloudFront. The default value for new web distributions is http2. Viewers that don't support HTTP/2 automatically use an earlier HTTP version.

For viewers and CloudFront to use HTTP/2, viewers must support TLS 1.2 or later, and must support Server Name Identification (SNI).

In general, configuring CloudFront to communicate with viewers using HTTP/2 reduces latency. You can improve performance by optimizing for HTTP/2. For more information, do an Internet search for \"http/2 optimization.\"

" + }, + "IsIPV6Enabled":{ + "shape":"boolean", + "documentation":"

If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify true. If you specify false, CloudFront responds to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses. This allows viewers to submit a second request, for an IPv4 address for your distribution.

In general, you should enable IPv6 if you have users on IPv6 networks who want to access your content. However, if you're using signed URLs or signed cookies to restrict access to your content, and if you're using a custom policy that includes the IpAddress parameter to restrict the IP addresses that can access your content, don't enable IPv6. If you want to restrict access to some content by IP address and not restrict access to other content (or restrict access but not by IP address), you can create two distributions. For more information, see Creating a Signed URL Using a Custom Policy in the Amazon CloudFront Developer Guide.

If you're using an Amazon Route 53 alias resource record set to route traffic to your CloudFront distribution, you need to create a second alias resource record set when both of the following are true:

For more information, see Routing Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name in the Amazon Route 53 Developer Guide.

If you created a CNAME resource record set, either with Amazon Route 53 or with another DNS service, you don't need to make any changes. A CNAME record will route traffic to your distribution regardless of the IP address format of the viewer request.

" + } + }, + "documentation":"

A distribution configuration.

" + }, + "DistributionConfigWithTags":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Tags" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "documentation":"

A distribution configuration.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A complex type that contains zero or more Tag elements.

" + } + }, + "documentation":"

A distribution Configuration and a list of tags to be associated with the distribution.

" + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

The value you provided for the Marker request parameter.

" + }, + "NextMarker":{ + "shape":"string", + "documentation":"

If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The value you provided for the MaxItems request parameter.

" + }, + "IsTruncated":{ + "shape":"boolean", + "documentation":"

A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of distributions that were created by the current AWS account.

" + }, + "Items":{ + "shape":"DistributionSummaryList", + "documentation":"

A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account.

" + } + }, + "documentation":"

A distribution list.

" + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "ARN", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId", + "HttpVersion", + "IsIPV6Enabled" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier for the distribution. For example: EDFDVBD632BHDS5.

" + }, + "ARN":{ + "shape":"string", + "documentation":"

The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID.

" + }, + "Status":{ + "shape":"string", + "documentation":"

The current status of the distribution. When the status is Deployed, the distribution's information is propagated to all CloudFront edge locations.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The date and time the distribution was last modified.

" + }, + "DomainName":{ + "shape":"string", + "documentation":"

The domain name that corresponds to the distribution, for example, d111111abcdef8.cloudfront.net.

" + }, + "Aliases":{ + "shape":"Aliases", + "documentation":"

A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.

" + }, + "Origins":{ + "shape":"Origins", + "documentation":"

A complex type that contains information about origins for this distribution.

" + }, + "DefaultCacheBehavior":{ + "shape":"DefaultCacheBehavior", + "documentation":"

A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" + }, + "CacheBehaviors":{ + "shape":"CacheBehaviors", + "documentation":"

A complex type that contains zero or more CacheBehavior elements.

" + }, + "CustomErrorResponses":{ + "shape":"CustomErrorResponses", + "documentation":"

A complex type that contains zero or more CustomErrorResponses elements.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

The comment originally specified when this distribution was created.

" + }, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{ + "shape":"boolean", + "documentation":"

Whether the distribution is enabled to accept user requests for content.

" + }, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{ + "shape":"string", + "documentation":"

The Web ACL Id (if any) associated with the distribution.

" + }, + "HttpVersion":{ + "shape":"HttpVersion", + "documentation":"

Specify the maximum HTTP version that you want viewers to use to communicate with CloudFront. The default value for new web distributions is http2. Viewers that don't support HTTP/2 will automatically use an earlier version.

" + }, + "IsIPV6Enabled":{ + "shape":"boolean", + "documentation":"

Whether CloudFront responds to IPv6 DNS requests with an IPv6 address for your distribution.

" + } + }, + "documentation":"

A summary of the information about a CloudFront distribution.

" + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "EncryptionEntities":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

Number of field pattern items in a field-level encryption content type-profile mapping.

" + }, + "Items":{ + "shape":"EncryptionEntityList", + "documentation":"

An array of field patterns in a field-level encryption content type-profile mapping.

" + } + }, + "documentation":"

Complex data type for field-level encryption profiles that includes all of the encryption entities.

" + }, + "EncryptionEntity":{ + "type":"structure", + "required":[ + "PublicKeyId", + "ProviderId", + "FieldPatterns" + ], + "members":{ + "PublicKeyId":{ + "shape":"string", + "documentation":"

The public key associated with a set of field-level encryption patterns, to be used when encrypting the fields that match the patterns.

" + }, + "ProviderId":{ + "shape":"string", + "documentation":"

The provider associated with the public key being used for encryption. This value must also be provided with the private key for applications to be able to decrypt data.

" + }, + "FieldPatterns":{ + "shape":"FieldPatterns", + "documentation":"

Field patterns in a field-level encryption content type profile specify the fields that you want to be encrypted. You can provide the full field name, or any beginning characters followed by a wildcard (*). You can't overlap field patterns. For example, you can't have both ABC* and AB*. Note that field patterns are case-sensitive.

" + } + }, + "documentation":"

Complex data type for field-level encryption profiles that includes the encryption key and field pattern specifications.

" + }, + "EncryptionEntityList":{ + "type":"list", + "member":{ + "shape":"EncryptionEntity", + "locationName":"EncryptionEntity" + } + }, + "EventType":{ + "type":"string", + "enum":[ + "viewer-request", + "viewer-response", + "origin-request", + "origin-response" + ] + }, + "FieldLevelEncryption":{ + "type":"structure", + "required":[ + "Id", + "LastModifiedTime", + "FieldLevelEncryptionConfig" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The configuration ID for a field-level encryption configuration which includes a set of profiles that specify certain selected data fields to be encrypted by specific public keys.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The last time the field-level encryption configuration was changed.

" + }, + "FieldLevelEncryptionConfig":{ + "shape":"FieldLevelEncryptionConfig", + "documentation":"

A complex data type that includes the profile configurations specified for field-level encryption.

" + } + }, + "documentation":"

A complex data type that includes the profile configurations and other options specified for field-level encryption.

" + }, + "FieldLevelEncryptionConfig":{ + "type":"structure", + "required":["CallerReference"], + "members":{ + "CallerReference":{ + "shape":"string", + "documentation":"

A unique number that ensures the request can't be replayed.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

An optional comment about the configuration.

" + }, + "QueryArgProfileConfig":{ + "shape":"QueryArgProfileConfig", + "documentation":"

A complex data type that specifies when to forward content if a profile isn't found and the profile that can be provided as a query argument in a request.

" + }, + "ContentTypeProfileConfig":{ + "shape":"ContentTypeProfileConfig", + "documentation":"

A complex data type that specifies when to forward content if a content type isn't recognized and profiles to use as by default in a request if a query argument doesn't specify a profile to use.

" + } + }, + "documentation":"

A complex data type that includes the profile configurations specified for field-level encryption.

" + }, + "FieldLevelEncryptionConfigAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified configuration for field-level encryption already exists.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "FieldLevelEncryptionConfigInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified configuration for field-level encryption is in use.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "FieldLevelEncryptionList":{ + "type":"structure", + "required":[ + "MaxItems", + "Quantity" + ], + "members":{ + "NextMarker":{ + "shape":"string", + "documentation":"

If there are more elements to be listed, this element is present and contains the value that you can use for the Marker request parameter to continue listing your configurations where you left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The maximum number of elements you want in the response body.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of field-level encryption items.

" + }, + "Items":{ + "shape":"FieldLevelEncryptionSummaryList", + "documentation":"

An array of field-level encryption items.

" + } + }, + "documentation":"

List of field-level encrpytion configurations.

" + }, + "FieldLevelEncryptionProfile":{ + "type":"structure", + "required":[ + "Id", + "LastModifiedTime", + "FieldLevelEncryptionProfileConfig" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The ID for a field-level encryption profile configuration which includes a set of profiles that specify certain selected data fields to be encrypted by specific public keys.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The last time the field-level encryption profile was updated.

" + }, + "FieldLevelEncryptionProfileConfig":{ + "shape":"FieldLevelEncryptionProfileConfig", + "documentation":"

A complex data type that includes the profile name and the encryption entities for the field-level encryption profile.

" + } + }, + "documentation":"

A complex data type for field-level encryption profiles.

" + }, + "FieldLevelEncryptionProfileAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified profile for field-level encryption already exists.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "FieldLevelEncryptionProfileConfig":{ + "type":"structure", + "required":[ + "Name", + "CallerReference", + "EncryptionEntities" + ], + "members":{ + "Name":{ + "shape":"string", + "documentation":"

Profile name for the field-level encryption profile.

" + }, + "CallerReference":{ + "shape":"string", + "documentation":"

A unique number that ensures the request can't be replayed.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

An optional comment for the field-level encryption profile.

" + }, + "EncryptionEntities":{ + "shape":"EncryptionEntities", + "documentation":"

A complex data type of encryption entities for the field-level encryption profile that include the public key ID, provider, and field patterns for specifying which fields to encrypt with this key.

" + } + }, + "documentation":"

A complex data type of profiles for the field-level encryption.

" + }, + "FieldLevelEncryptionProfileInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified profile for field-level encryption is in use.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "FieldLevelEncryptionProfileList":{ + "type":"structure", + "required":[ + "MaxItems", + "Quantity" + ], + "members":{ + "NextMarker":{ + "shape":"string", + "documentation":"

If there are more elements to be listed, this element is present and contains the value that you can use for the Marker request parameter to continue listing your profiles where you left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The maximum number of field-level encryption profiles you want in the response body.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of field-level encryption profiles.

" + }, + "Items":{ + "shape":"FieldLevelEncryptionProfileSummaryList", + "documentation":"

The field-level encryption profile items.

" + } + }, + "documentation":"

List of field-level encryption profiles.

" + }, + "FieldLevelEncryptionProfileSizeExceeded":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum size of a profile for field-level encryption was exceeded.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "FieldLevelEncryptionProfileSummary":{ + "type":"structure", + "required":[ + "Id", + "LastModifiedTime", + "Name", + "EncryptionEntities" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

ID for the field-level encryption profile summary.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The time when the the field-level encryption profile summary was last updated.

" + }, + "Name":{ + "shape":"string", + "documentation":"

Name for the field-level encryption profile summary.

" + }, + "EncryptionEntities":{ + "shape":"EncryptionEntities", + "documentation":"

A complex data type of encryption entities for the field-level encryption profile that include the public key ID, provider, and field patterns for specifying which fields to encrypt with this key.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

An optional comment for the field-level encryption profile summary.

" + } + }, + "documentation":"

The field-level encryption profile summary.

" + }, + "FieldLevelEncryptionProfileSummaryList":{ + "type":"list", + "member":{ + "shape":"FieldLevelEncryptionProfileSummary", + "locationName":"FieldLevelEncryptionProfileSummary" + } + }, + "FieldLevelEncryptionSummary":{ + "type":"structure", + "required":[ + "Id", + "LastModifiedTime" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The unique ID of a field-level encryption item.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The last time that the summary of field-level encryption items was modified.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

An optional comment about the field-level encryption item.

" + }, + "QueryArgProfileConfig":{ + "shape":"QueryArgProfileConfig", + "documentation":"

A summary of a query argument-profile mapping.

" + }, + "ContentTypeProfileConfig":{ + "shape":"ContentTypeProfileConfig", + "documentation":"

A summary of a content type-profile mapping.

" + } + }, + "documentation":"

A summary of a field-level encryption item.

" + }, + "FieldLevelEncryptionSummaryList":{ + "type":"list", + "member":{ + "shape":"FieldLevelEncryptionSummary", + "locationName":"FieldLevelEncryptionSummary" + } + }, + "FieldPatternList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"FieldPattern" + } + }, + "FieldPatterns":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of field-level encryption field patterns.

" + }, + "Items":{ + "shape":"FieldPatternList", + "documentation":"

An array of the field-level encryption field patterns.

" + } + }, + "documentation":"

A complex data type that includes the field patterns to match for field-level encryption.

" + }, + "Format":{ + "type":"string", + "enum":["URLEncoded"] + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{ + "shape":"boolean", + "documentation":"

Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior and cache based on the query string parameters. CloudFront behavior depends on the value of QueryString and on the values that you specify for QueryStringCacheKeys, if any:

If you specify true for QueryString and you don't specify any values for QueryStringCacheKeys, CloudFront forwards all query string parameters to the origin and caches based on all query string parameters. Depending on how many query string parameters and values you have, this can adversely affect performance because CloudFront must forward more requests to the origin.

If you specify true for QueryString and you specify one or more values for QueryStringCacheKeys, CloudFront forwards all query string parameters to the origin, but it only caches based on the query string parameters that you specify.

If you specify false for QueryString, CloudFront doesn't forward any query string parameters to the origin, and doesn't cache based on query string parameters.

For more information, see Configuring CloudFront to Cache Based on Query String Parameters in the Amazon CloudFront Developer Guide.

" + }, + "Cookies":{ + "shape":"CookiePreference", + "documentation":"

A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies in the Amazon CloudFront Developer Guide.

" + }, + "Headers":{ + "shape":"Headers", + "documentation":"

A complex type that specifies the Headers, if any, that you want CloudFront to base caching on for this cache behavior.

" + }, + "QueryStringCacheKeys":{ + "shape":"QueryStringCacheKeys", + "documentation":"

A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.

" + } + }, + "documentation":"

A complex type that specifies how CloudFront handles query strings and cookies.

" + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{ + "shape":"GeoRestrictionType", + "documentation":"

The method that you want to use to restrict distribution of your content by country:

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.

" + }, + "Items":{ + "shape":"LocationList", + "documentation":"

A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist).

The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country.

CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list on the CloudFront console, which includes both country names and codes.

" + } + }, + "documentation":"

A complex type that controls the countries in which your content is distributed. CloudFront determines the location of your users using MaxMind GeoIP databases.

" + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identity's ID.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

The origin access identity's configuration information. For more information, see CloudFrontOriginAccessIdentityConfigComplexType.

" + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "documentation":"

The origin access identity's configuration information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identity's ID.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

The request to get an origin access identity's information.

" + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{ + "shape":"CloudFrontOriginAccessIdentity", + "documentation":"

The origin access identity's information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The distribution's ID.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

The request to get a distribution configuration.

" + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "documentation":"

The distribution's configuration information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The distribution's ID.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

The request to get a distribution's information.

" + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{ + "shape":"Distribution", + "documentation":"

The distribution's information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the distribution's information. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Distribution" + }, + "GetFieldLevelEncryptionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Request the ID for the field-level encryption configuration information.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetFieldLevelEncryptionConfigResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionConfig":{ + "shape":"FieldLevelEncryptionConfig", + "documentation":"

Return the field-level encryption configuration information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the field level encryption configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryptionConfig" + }, + "GetFieldLevelEncryptionProfileConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Get the ID for the field-level encryption profile configuration information.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetFieldLevelEncryptionProfileConfigResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionProfileConfig":{ + "shape":"FieldLevelEncryptionProfileConfig", + "documentation":"

Return the field-level encryption profile configuration information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the field-level encryption profile configuration result. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryptionProfileConfig" + }, + "GetFieldLevelEncryptionProfileRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Get the ID for the field-level encryption profile information.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetFieldLevelEncryptionProfileResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionProfile":{ + "shape":"FieldLevelEncryptionProfile", + "documentation":"

Return the field-level encryption profile information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the field level encryption profile. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryptionProfile" + }, + "GetFieldLevelEncryptionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Request the ID for the field-level encryption configuration information.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetFieldLevelEncryptionResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryption":{ + "shape":"FieldLevelEncryption", + "documentation":"

Return the field-level encryption configuration information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the field level encryption configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryption" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "documentation":"

The distribution's ID.

", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "documentation":"

The identifier for the invalidation request, for example, IDFDVBD632BHDS5.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

The request to get an invalidation's information.

" + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{ + "shape":"Invalidation", + "documentation":"

The invalidation's information. For more information, see Invalidation Complex Type.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Invalidation" + }, + "GetPublicKeyConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Request the ID for the public key configuration.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetPublicKeyConfigResult":{ + "type":"structure", + "members":{ + "PublicKeyConfig":{ + "shape":"PublicKeyConfig", + "documentation":"

Return the result for the public key configuration.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the public key configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"PublicKeyConfig" + }, + "GetPublicKeyRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

Request the ID for the public key.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetPublicKeyResult":{ + "type":"structure", + "members":{ + "PublicKey":{ + "shape":"PublicKey", + "documentation":"

Return the public key.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the public key. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"PublicKey" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The streaming distribution's ID.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

To request to get a streaming distribution configuration.

" + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "documentation":"

The streaming distribution's configuration information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The streaming distribution's ID.

", + "location":"uri", + "locationName":"Id" + } + }, + "documentation":"

The request to get a streaming distribution's information.

" + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{ + "shape":"StreamingDistribution", + "documentation":"

The streaming distribution's information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of different headers that you want CloudFront to base caching on for this cache behavior. You can configure each cache behavior in a web distribution to do one of the following:

Regardless of which option you choose, CloudFront forwards headers to your origin based on whether the origin is an S3 bucket or a custom origin. See the following documentation:

" + }, + "Items":{ + "shape":"HeaderList", + "documentation":"

A list that contains one Name element for each header that you want CloudFront to use for caching in this cache behavior. If Quantity is 0, omit Items.

" + } + }, + "documentation":"

A complex type that specifies the request headers, if any, that you want CloudFront to base caching on for this cache behavior.

For the headers that you specify, CloudFront caches separate versions of a specified object based on the header values in viewer requests. For example, suppose viewer requests for logo.jpg contain a custom product header that has a value of either acme or apex, and you configure CloudFront to cache your content based on values in the product header. CloudFront forwards the product header to the origin and caches the response from the origin once for each header value. For more information about caching based on header values, see How CloudFront Forwards and Caches Headers in the Amazon CloudFront Developer Guide.

" + }, + "HttpVersion":{ + "type":"string", + "enum":[ + "http1.1", + "http2" + ] + }, + "IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified configuration for field-level encryption can't be associated with the specified cache behavior.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Origin and CallerReference cannot be updated.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The value of Quantity and the size of Items don't match.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The argument is invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The default root object file name is too big or contains an invalid character.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The If-Match version is missing or not valid for the distribution.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLambdaFunctionAssociation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified Lambda function association is invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The origin access identity is not valid or doesn't exist.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginKeepaliveTimeout":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginReadTimeout":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that support Server Name Indication (SNI).

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidQueryStringParameters":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The relative path is too big, is not URL-encoded, or does not begin with a slash (/).

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTagging":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier for the invalidation request. For example: IDFDVBD632BHDS5.

" + }, + "Status":{ + "shape":"string", + "documentation":"

The status of the invalidation request. When the invalidation batch is finished, the status is Completed.

" + }, + "CreateTime":{ + "shape":"timestamp", + "documentation":"

The date and time the invalidation request was first made.

" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "documentation":"

The current invalidation information for the batch request.

" + } + }, + "documentation":"

An invalidation.

" + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{ + "shape":"Paths", + "documentation":"

A complex type that contains information about the objects that you want to invalidate. For more information, see Specifying the Objects to Invalidate in the Amazon CloudFront Developer Guide.

" + }, + "CallerReference":{ + "shape":"string", + "documentation":"

A value that you specify to uniquely identify an invalidation request. CloudFront uses the value to prevent you from accidentally resubmitting an identical request. Whenever you create a new invalidation request, you must specify a new value for CallerReference and change other values in the request as applicable. One way to ensure that the value of CallerReference is unique is to use a timestamp, for example, 20120301090000.

If you make a second invalidation request with the same value for CallerReference, and if the rest of the request is the same, CloudFront doesn't create a new invalidation request. Instead, CloudFront returns information about the invalidation request that you previously created with the same CallerReference.

If CallerReference is a value you already sent in a previous invalidation batch request but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.

" + } + }, + "documentation":"

An invalidation batch.

" + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

The value that you provided for the Marker request parameter.

" + }, + "NextMarker":{ + "shape":"string", + "documentation":"

If IsTruncated is true, this element is present and contains the value that you can use for the Marker request parameter to continue listing your invalidation batches where they left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The value that you provided for the MaxItems request parameter.

" + }, + "IsTruncated":{ + "shape":"boolean", + "documentation":"

A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of invalidation batches that were created by the current AWS account.

" + }, + "Items":{ + "shape":"InvalidationSummaryList", + "documentation":"

A complex type that contains one InvalidationSummary element for each invalidation batch created by the current AWS account.

" + } + }, + "documentation":"

The InvalidationList complex type describes the list of invalidation objects. For more information about invalidation, see Invalidating Objects (Web Distributions Only) in the Amazon CloudFront Developer Guide.

" + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The unique ID for an invalidation request.

" + }, + "CreateTime":{"shape":"timestamp"}, + "Status":{ + "shape":"string", + "documentation":"

The status of an invalidation request.

" + } + }, + "documentation":"

A summary of an invalidation request.

" + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of active CloudFront key pairs for AwsAccountNumber.

For more information, see ActiveTrustedSigners.

" + }, + "Items":{ + "shape":"KeyPairIdList", + "documentation":"

A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.

For more information, see ActiveTrustedSigners.

" + } + }, + "documentation":"

A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.

For more information, see ActiveTrustedSigners.

" + }, + "LambdaFunctionARN":{"type":"string"}, + "LambdaFunctionAssociation":{ + "type":"structure", + "required":[ + "LambdaFunctionARN", + "EventType" + ], + "members":{ + "LambdaFunctionARN":{ + "shape":"LambdaFunctionARN", + "documentation":"

The ARN of the Lambda function. You must specify the ARN of a function version; you can't specify a Lambda alias or $LATEST.

" + }, + "EventType":{ + "shape":"EventType", + "documentation":"

Specifies the event type that triggers a Lambda function invocation. You can specify the following values:

" + }, + "IncludeBody":{ + "shape":"boolean", + "documentation":"

A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing Body Content in the Amazon CloudFront Developer Guide.

" + } + }, + "documentation":"

A complex type that contains a Lambda function association.

" + }, + "LambdaFunctionAssociationList":{ + "type":"list", + "member":{ + "shape":"LambdaFunctionAssociation", + "locationName":"LambdaFunctionAssociation" + } + }, + "LambdaFunctionAssociations":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of Lambda function associations for this cache behavior.

" + }, + "Items":{ + "shape":"LambdaFunctionAssociationList", + "documentation":"

Optional: A complex type that contains LambdaFunctionAssociation items for this cache behavior. If Quantity is 0, you can omit Items.

" + } + }, + "documentation":"

A complex type that specifies a list of Lambda functions associations for a cache behavior.

If you want to invoke one or more Lambda functions triggered by requests that match the PathPattern of the cache behavior, specify the applicable values for Quantity and Items. Note that there can be up to 4 LambdaFunctionAssociation items in this list (one for each possible value of EventType) and each EventType can be associated with the Lambda function only once.

If you don't want to invoke any Lambda functions for the requests that match PathPattern, specify 0 for Quantity and omit Items.

" + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of origin access identities you want in the response body.

", + "location":"querystring", + "locationName":"MaxItems" + } + }, + "documentation":"

The request to list origin access identities.

" + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{ + "shape":"CloudFrontOriginAccessIdentityList", + "documentation":"

The CloudFrontOriginAccessIdentityList type.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.

", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "documentation":"

The ID of the AWS WAF web ACL that you want to list the associated distributions. If you specify \"null\" for the ID, the request returns a list of the distributions that aren't associated with a web ACL.

", + "location":"uri", + "locationName":"WebACLId" + } + }, + "documentation":"

The request to list distributions that are associated with a specified AWS WAF web ACL.

" + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{ + "shape":"DistributionList", + "documentation":"

The DistributionList type.

" + } + }, + "documentation":"

The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.

", + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this when paginating results to indicate where to begin in your list of distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of distributions you want in the response body.

", + "location":"querystring", + "locationName":"MaxItems" + } + }, + "documentation":"

The request to list your distributions.

" + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{ + "shape":"DistributionList", + "documentation":"

The DistributionList type.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"DistributionList" + }, + "ListFieldLevelEncryptionConfigsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this when paginating results to indicate where to begin in your list of configurations. The results include configurations in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last configuration on that page).

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of field-level encryption configurations you want in the response body.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFieldLevelEncryptionConfigsResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionList":{ + "shape":"FieldLevelEncryptionList", + "documentation":"

Returns a list of all field-level encryption configurations that have been created in CloudFront for this account.

" + } + }, + "payload":"FieldLevelEncryptionList" + }, + "ListFieldLevelEncryptionProfilesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this when paginating results to indicate where to begin in your list of profiles. The results include profiles in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last profile on that page).

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of field-level encryption profiles you want in the response body.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFieldLevelEncryptionProfilesResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionProfileList":{ + "shape":"FieldLevelEncryptionProfileList", + "documentation":"

Returns a list of the field-level encryption profiles that have been created in CloudFront for this account.

" + } + }, + "payload":"FieldLevelEncryptionProfileList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "documentation":"

The distribution's ID.

", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "documentation":"

Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of invalidation batches that you want in the response body.

", + "location":"querystring", + "locationName":"MaxItems" + } + }, + "documentation":"

The request to list invalidations.

" + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{ + "shape":"InvalidationList", + "documentation":"

Information about invalidation batches.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"InvalidationList" + }, + "ListPublicKeysRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this when paginating results to indicate where to begin in your list of public keys. The results include public keys in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last public key on that page).

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of public keys you want in the response body.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListPublicKeysResult":{ + "type":"structure", + "members":{ + "PublicKeyList":{ + "shape":"PublicKeyList", + "documentation":"

Returns a list of all public keys that have been added to CloudFront for this account.

" + } + }, + "payload":"PublicKeyList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

The value that you provided for the Marker request parameter.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The value that you provided for the MaxItems request parameter.

", + "location":"querystring", + "locationName":"MaxItems" + } + }, + "documentation":"

The request to list your streaming distributions.

" + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{ + "shape":"StreamingDistributionList", + "documentation":"

The StreamingDistributionList type.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"StreamingDistributionList" + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["Resource"], + "members":{ + "Resource":{ + "shape":"ResourceARN", + "documentation":"

An ARN of a CloudFront resource.

", + "location":"querystring", + "locationName":"Resource" + } + }, + "documentation":"

The request to list tags for a CloudFront resource.

" + }, + "ListTagsForResourceResult":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

A complex type that contains zero or more Tag elements.

" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Tags" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix, and IncludeCookies, the values are automatically deleted.

" + }, + "IncludeCookies":{ + "shape":"boolean", + "documentation":"

Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you don't want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.

" + }, + "Bucket":{ + "shape":"string", + "documentation":"

The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.

" + }, + "Prefix":{ + "shape":"string", + "documentation":"

An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.

" + } + }, + "documentation":"

A complex type that controls whether access logs are written for the distribution.

" + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1", + "TLSv1_2016", + "TLSv1.1_2016", + "TLSv1.2_2018" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

This operation requires a body. Ensure that the body is present and the Content-Type header is set.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified origin access identity does not exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified distribution does not exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchFieldLevelEncryptionConfig":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified configuration for field-level encryption doesn't exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchFieldLevelEncryptionProfile":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified profile for field-level encryption doesn't exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified invalidation does not exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

No origin exists with the specified Origin Id.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchPublicKey":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified public key doesn't exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchResource":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified streaming distribution does not exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

A unique identifier for the origin. The value of Id must be unique within the distribution.

When you specify the value of TargetOriginId for the default cache behavior or for another cache behavior, you indicate the origin to which you want the cache behavior to route requests by specifying the value of the Id element for that origin. When a request matches the path pattern for that cache behavior, CloudFront routes the request to the specified origin. For more information, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" + }, + "DomainName":{ + "shape":"string", + "documentation":"

Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. If you set up your bucket to be configured as a website endpoint, enter the Amazon S3 static website hosting endpoint for the bucket.

Constraints for Amazon S3 origins:

Custom Origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.

Constraints for custom origins:

" + }, + "OriginPath":{ + "shape":"string", + "documentation":"

An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName, for example, example.com/production. Do not include a / at the end of the directory name.

For example, suppose you've specified the following values for your distribution:

When a user enters example.com/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/index.html.

When a user enters example.com/acme/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/acme/index.html.

" + }, + "CustomHeaders":{ + "shape":"CustomHeaders", + "documentation":"

A complex type that contains names and values for the custom headers that you want.

" + }, + "S3OriginConfig":{ + "shape":"S3OriginConfig", + "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.

" + }, + "CustomOriginConfig":{ + "shape":"CustomOriginConfig", + "documentation":"

A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead.

" + } + }, + "documentation":"

A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files. You must create at least one origin.

For the current limit on the number of origins that you can create for a distribution, see Amazon CloudFront Limits in the AWS General Reference.

" + }, + "OriginCustomHeader":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{ + "shape":"string", + "documentation":"

The name of a header that you want CloudFront to forward to your origin. For more information, see Forwarding Custom Headers to Your Origin (Web Distributions Only) in the Amazon Amazon CloudFront Developer Guide.

" + }, + "HeaderValue":{ + "shape":"string", + "documentation":"

The value for the header that you specified in the HeaderName field.

" + } + }, + "documentation":"

A complex type that contains HeaderName and HeaderValue elements, if any, for this distribution.

" + }, + "OriginCustomHeadersList":{ + "type":"list", + "member":{ + "shape":"OriginCustomHeader", + "locationName":"OriginCustomHeader" + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer", + "https-only" + ] + }, + "OriginSslProtocols":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.

" + }, + "Items":{ + "shape":"SslProtocolsList", + "documentation":"

A list that contains allowed SSL/TLS protocols for this distribution.

" + } + }, + "documentation":"

A complex type that contains information about the SSL/TLS protocols that CloudFront can use when establishing an HTTPS connection with your origin.

" + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of origins for this distribution.

" + }, + "Items":{ + "shape":"OriginList", + "documentation":"

A complex type that contains origins for this distribution.

" + } + }, + "documentation":"

A complex type that contains information about origins for this distribution.

" + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of objects that you want to invalidate.

" + }, + "Items":{ + "shape":"PathList", + "documentation":"

A complex type that contains a list of the paths that you want to invalidate.

" + } + }, + "documentation":"

A complex type that contains information about the objects that you want to invalidate. For more information, see Specifying the Objects to Invalidate in the Amazon CloudFront Developer Guide.

" + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The precondition given in one or more of the request-header fields evaluated to false.

", + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "PublicKey":{ + "type":"structure", + "required":[ + "Id", + "CreatedTime", + "PublicKeyConfig" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

A unique ID assigned to a public key you've added to CloudFront.

" + }, + "CreatedTime":{ + "shape":"timestamp", + "documentation":"

A time you added a public key to CloudFront.

" + }, + "PublicKeyConfig":{ + "shape":"PublicKeyConfig", + "documentation":"

A complex data type for a public key you add to CloudFront to use with features like field-level encryption.

" + } + }, + "documentation":"

A complex data type of public keys you add to CloudFront to use with features like field-level encryption.

" + }, + "PublicKeyAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified public key already exists.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "PublicKeyConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Name", + "EncodedKey" + ], + "members":{ + "CallerReference":{ + "shape":"string", + "documentation":"

A unique number that ensures the request can't be replayed.

" + }, + "Name":{ + "shape":"string", + "documentation":"

The name for a public key you add to CloudFront to use with features like field-level encryption.

" + }, + "EncodedKey":{ + "shape":"string", + "documentation":"

The encoded public key that you want to add to CloudFront to use with features like field-level encryption.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

An optional comment about a public key.

" + } + }, + "documentation":"

Information about a public key you add to CloudFront to use with features like field-level encryption.

" + }, + "PublicKeyInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified public key is in use.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "PublicKeyList":{ + "type":"structure", + "required":[ + "MaxItems", + "Quantity" + ], + "members":{ + "NextMarker":{ + "shape":"string", + "documentation":"

If there are more elements to be listed, this element is present and contains the value that you can use for the Marker request parameter to continue listing your public keys where you left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The maximum number of public keys you want in the response body.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of public keys you added to CloudFront to use with features like field-level encryption.

" + }, + "Items":{ + "shape":"PublicKeySummaryList", + "documentation":"

An array of information about a public key you add to CloudFront to use with features like field-level encryption.

" + } + }, + "documentation":"

A list of public keys you've added to CloudFront to use with features like field-level encryption.

" + }, + "PublicKeySummary":{ + "type":"structure", + "required":[ + "Id", + "Name", + "CreatedTime", + "EncodedKey" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

ID for public key information summary.

" + }, + "Name":{ + "shape":"string", + "documentation":"

Name for public key information summary.

" + }, + "CreatedTime":{ + "shape":"timestamp", + "documentation":"

Creation time for public key information summary.

" + }, + "EncodedKey":{ + "shape":"string", + "documentation":"

Encoded key for public key information summary.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

Comment for public key information summary.

" + } + }, + "documentation":"

Public key information summary.

" + }, + "PublicKeySummaryList":{ + "type":"list", + "member":{ + "shape":"PublicKeySummary", + "locationName":"PublicKeySummary" + } + }, + "QueryArgProfile":{ + "type":"structure", + "required":[ + "QueryArg", + "ProfileId" + ], + "members":{ + "QueryArg":{ + "shape":"string", + "documentation":"

Query argument for field-level encryption query argument-profile mapping.

" + }, + "ProfileId":{ + "shape":"string", + "documentation":"

ID of profile to use for field-level encryption query argument-profile mapping

" + } + }, + "documentation":"

Query argument-profile mapping for field-level encryption.

" + }, + "QueryArgProfileConfig":{ + "type":"structure", + "required":["ForwardWhenQueryArgProfileIsUnknown"], + "members":{ + "ForwardWhenQueryArgProfileIsUnknown":{ + "shape":"boolean", + "documentation":"

Flag to set if you want a request to be forwarded to the origin even if the profile specified by the field-level encryption query argument, fle-profile, is unknown.

" + }, + "QueryArgProfiles":{ + "shape":"QueryArgProfiles", + "documentation":"

Profiles specified for query argument-profile mapping for field-level encryption.

" + } + }, + "documentation":"

Configuration for query argument-profile mapping for field-level encryption.

" + }, + "QueryArgProfileEmpty":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

No profile specified for the field-level encryption query argument.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "QueryArgProfileList":{ + "type":"list", + "member":{ + "shape":"QueryArgProfile", + "locationName":"QueryArgProfile" + } + }, + "QueryArgProfiles":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

Number of profiles for query argument-profile mapping for field-level encryption.

" + }, + "Items":{ + "shape":"QueryArgProfileList", + "documentation":"

Number of items for query argument-profile mapping for field-level encryption.

" + } + }, + "documentation":"

Query argument-profile mapping for field-level encryption.

" + }, + "QueryStringCacheKeys":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{ + "shape":"integer", + "documentation":"

The number of whitelisted query string parameters for this cache behavior.

" + }, + "Items":{ + "shape":"QueryStringCacheKeysList", + "documentation":"

(Optional) A list that contains the query string parameters that you want CloudFront to use as a basis for caching for this cache behavior. If Quantity is 0, you can omit Items.

" + } + } + }, + "QueryStringCacheKeysList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "ResourceARN":{ + "type":"string", + "pattern":"arn:aws:cloudfront::[0-9]+:.*" + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + }, + "documentation":"

A complex type that identifies ways in which you want to restrict distribution of your content.

" + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{ + "shape":"string", + "documentation":"

The DNS name of the Amazon S3 origin.

" + }, + "OriginAccessIdentity":{ + "shape":"string", + "documentation":"

The CloudFront origin access identity to associate with the RTMP distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 bucket through CloudFront.

If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.

To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.

To replace the origin access identity, update the distribution configuration and specify the new origin access identity.

For more information, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content in the Amazon Amazon CloudFront Developer Guide.

" + } + }, + "documentation":"

A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.

" + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{ + "shape":"string", + "documentation":"

The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. The format of the value is:

origin-access-identity/cloudfront/ID-of-origin-access-identity

where ID-of-origin-access-identity is the value that CloudFront returned in the ID element when you created the origin access identity.

If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.

To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.

To replace the origin access identity, update the distribution configuration and specify the new origin access identity.

For more information about the origin access identity, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" + } + }, + "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.

" + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{ + "shape":"string", + "documentation":"

An AWS account that is included in the TrustedSigners complex type for this RTMP distribution. Valid values include:

" + }, + "KeyPairIds":{ + "shape":"KeyPairIds", + "documentation":"

A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.

" + } + }, + "documentation":"

A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.

" + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "SslProtocol":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2" + ] + }, + "SslProtocolsList":{ + "type":"list", + "member":{ + "shape":"SslProtocol", + "locationName":"SslProtocol" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "ARN", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier for the RTMP distribution. For example: EGTXBD79EXAMPLE.

" + }, + "ARN":{"shape":"string"}, + "Status":{ + "shape":"string", + "documentation":"

The current status of the RTMP distribution. When the status is Deployed, the distribution's information is propagated to all CloudFront edge locations.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The date and time that the distribution was last modified.

" + }, + "DomainName":{ + "shape":"string", + "documentation":"

The domain name that corresponds to the streaming distribution, for example, s5c39gqb8ow64r.cloudfront.net.

" + }, + "ActiveTrustedSigners":{ + "shape":"ActiveTrustedSigners", + "documentation":"

A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for this distribution. These are the accounts that you want to allow to create signed URLs for private content.

The Signer complex type lists the AWS account number of the trusted signer or self if the signer is the AWS account that created the distribution. The Signer element also includes the IDs of any active CloudFront key pairs that are associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create signed URLs.

For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" + }, + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "documentation":"

The current configuration information for the RTMP distribution.

" + } + }, + "documentation":"

A streaming distribution.

" + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{ + "shape":"string", + "documentation":"

A unique number that ensures that the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value that you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value that you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.

" + }, + "S3Origin":{ + "shape":"S3Origin", + "documentation":"

A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.

" + }, + "Aliases":{ + "shape":"Aliases", + "documentation":"

A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

Any comments you want to include about the streaming distribution.

" + }, + "Logging":{ + "shape":"StreamingLoggingConfig", + "documentation":"

A complex type that controls whether access logs are written for the streaming distribution.

" + }, + "TrustedSigners":{ + "shape":"TrustedSigners", + "documentation":"

A complex type that specifies any AWS accounts that you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, include this element; if you want the distribution to use public URLs, remove this element. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" + }, + "PriceClass":{ + "shape":"PriceClass", + "documentation":"

A complex type that contains information about price class for this streaming distribution.

" + }, + "Enabled":{ + "shape":"boolean", + "documentation":"

Whether the streaming distribution is enabled to accept user requests for content.

" + } + }, + "documentation":"

The RTMP distribution's configuration information.

" + }, + "StreamingDistributionConfigWithTags":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Tags" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "documentation":"

A streaming distribution Configuration.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A complex type that contains zero or more Tag elements.

" + } + }, + "documentation":"

A streaming distribution Configuration and a list of tags to be associated with the streaming distribution.

" + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

The value you provided for the Marker request parameter.

" + }, + "NextMarker":{ + "shape":"string", + "documentation":"

If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your RTMP distributions where they left off.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The value you provided for the MaxItems request parameter.

" + }, + "IsTruncated":{ + "shape":"boolean", + "documentation":"

A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of streaming distributions that were created by the current AWS account.

" + }, + "Items":{ + "shape":"StreamingDistributionSummaryList", + "documentation":"

A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account.

" + } + }, + "documentation":"

A streaming distribution list.

" + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "ARN", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier for the distribution, for example, EDFDVBD632BHDS5.

" + }, + "ARN":{ + "shape":"string", + "documentation":"

The ARN (Amazon Resource Name) for the streaming distribution. For example: arn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID.

" + }, + "Status":{ + "shape":"string", + "documentation":"

Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The date and time the distribution was last modified.

" + }, + "DomainName":{ + "shape":"string", + "documentation":"

The domain name corresponding to the distribution, for example, d111111abcdef8.cloudfront.net.

" + }, + "S3Origin":{ + "shape":"S3Origin", + "documentation":"

A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.

" + }, + "Aliases":{ + "shape":"Aliases", + "documentation":"

A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.

" + }, + "TrustedSigners":{ + "shape":"TrustedSigners", + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items.If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

The comment originally specified when this distribution was created.

" + }, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{ + "shape":"boolean", + "documentation":"

Whether the distribution is enabled to accept end user requests for content.

" + } + }, + "documentation":"

A summary of the information for an Amazon CloudFront streaming distribution.

" + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.

" + }, + "Bucket":{ + "shape":"string", + "documentation":"

The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.

" + }, + "Prefix":{ + "shape":"string", + "documentation":"

An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element.

" + } + }, + "documentation":"

A complex type that controls whether access logs are written for this streaming distribution.

" + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

A string that contains Tag key.

The string length should be between 1 and 128 characters. Valid characters include a-z, A-Z, 0-9, space, and the special characters _ - . : / = + @.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

A string that contains an optional Tag value.

The string length should be between 0 and 256 characters. Valid characters include a-z, A-Z, 0-9, space, and the special characters _ - . : / = + @.

" + } + }, + "documentation":"

A complex type that contains Tag key and Tag value.

" + }, + "TagKey":{ + "type":"string", + "documentation":"

A string that contains Tag key.

The string length should be between 1 and 128 characters. Valid characters include a-z, A-Z, 0-9, space, and the special characters _ - . : / = + @.

", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{ + "shape":"TagKey", + "locationName":"Key" + } + }, + "TagKeys":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"TagKeyList", + "documentation":"

A complex type that contains Tag key elements.

" + } + }, + "documentation":"

A complex type that contains zero or more Tag elements.

" + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "Resource", + "Tags" + ], + "members":{ + "Resource":{ + "shape":"ResourceARN", + "documentation":"

An ARN of a CloudFront resource.

", + "location":"querystring", + "locationName":"Resource" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A complex type that contains zero or more Tag elements.

", + "locationName":"Tags", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to add tags to a CloudFront resource.

", + "payload":"Tags" + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Tags":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"TagList", + "documentation":"

A complex type that contains Tag elements.

" + } + }, + "documentation":"

A complex type that contains zero or more Tag elements.

" + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You cannot create more cache behaviors for the distribution.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You cannot create anymore custom SSL/TLS certificates.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Processing your request would cause you to exceed the maximum number of origin access identities allowed.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Your request contains more cookie names in the whitelist than are allowed per cache behavior.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Your request contains more CNAMEs than are allowed per distribution.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Processing your request would cause you to exceed the maximum number of distributions allowed.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionsAssociatedToFieldLevelEncryptionConfig":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of distributions have been associated with the specified configuration for field-level encryption.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionsWithLambdaAssociations":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Processing your request would cause the maximum number of distributions with Lambda function associations per owner to be exceeded.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyFieldLevelEncryptionConfigs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of configurations for field-level encryption have been created.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyFieldLevelEncryptionContentTypeProfiles":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of content type profiles for field-level encryption have been created.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyFieldLevelEncryptionEncryptionEntities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of encryption entities for field-level encryption have been created.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyFieldLevelEncryptionFieldPatterns":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of field patterns for field-level encryption have been created.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyFieldLevelEncryptionProfiles":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of profiles for field-level encryption have been created.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyFieldLevelEncryptionQueryArgProfiles":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of query arg profiles for field-level encryption have been created.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyLambdaFunctionAssociations":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Your request contains more Lambda function associations than are allowed per distribution.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOriginCustomHeaders":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You cannot create more origins for the distribution.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyPublicKeys":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The maximum number of public keys for field-level encryption have been created. To create a new public key, delete one of the existing keys.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyQueryStringParameters":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Processing your request would cause you to exceed the maximum number of streaming distributions allowed.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Your request contains more trusted signers than are allowed per distribution.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

One or more of your trusted signers don't exist.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of trusted signers for this cache behavior.

" + }, + "Items":{ + "shape":"AwsAccountNumberList", + "documentation":"

Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items.

" + } + }, + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

For more information about updating the distribution configuration, see DistributionConfig .

" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "Resource", + "TagKeys" + ], + "members":{ + "Resource":{ + "shape":"ResourceARN", + "documentation":"

An ARN of a CloudFront resource.

", + "location":"querystring", + "locationName":"Resource" + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

A complex type that contains zero or more Tag key elements.

", + "locationName":"TagKeys", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + } + }, + "documentation":"

The request to remove tags from a CloudFront resource.

", + "payload":"TagKeys" + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "documentation":"

The identity's configuration information.

", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + }, + "Id":{ + "shape":"string", + "documentation":"

The identity's id.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "documentation":"

The request to update an origin access identity.

", + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{ + "shape":"CloudFrontOriginAccessIdentity", + "documentation":"

The origin access identity's information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "documentation":"

The distribution's configuration information.

", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + }, + "Id":{ + "shape":"string", + "documentation":"

The distribution's id.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "documentation":"

The request to update a distribution.

", + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{ + "shape":"Distribution", + "documentation":"

The distribution's information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"Distribution" + }, + "UpdateFieldLevelEncryptionConfigRequest":{ + "type":"structure", + "required":[ + "FieldLevelEncryptionConfig", + "Id" + ], + "members":{ + "FieldLevelEncryptionConfig":{ + "shape":"FieldLevelEncryptionConfig", + "documentation":"

Request to update a field-level encryption configuration.

", + "locationName":"FieldLevelEncryptionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + }, + "Id":{ + "shape":"string", + "documentation":"

The ID of the configuration you want to update.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the configuration identity to update. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"FieldLevelEncryptionConfig" + }, + "UpdateFieldLevelEncryptionConfigResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryption":{ + "shape":"FieldLevelEncryption", + "documentation":"

Return the results of updating the configuration.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when updating the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryption" + }, + "UpdateFieldLevelEncryptionProfileRequest":{ + "type":"structure", + "required":[ + "FieldLevelEncryptionProfileConfig", + "Id" + ], + "members":{ + "FieldLevelEncryptionProfileConfig":{ + "shape":"FieldLevelEncryptionProfileConfig", + "documentation":"

Request to update a field-level encryption profile.

", + "locationName":"FieldLevelEncryptionProfileConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + }, + "Id":{ + "shape":"string", + "documentation":"

The ID of the field-level encryption profile request.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the profile identity to update. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"FieldLevelEncryptionProfileConfig" + }, + "UpdateFieldLevelEncryptionProfileResult":{ + "type":"structure", + "members":{ + "FieldLevelEncryptionProfile":{ + "shape":"FieldLevelEncryptionProfile", + "documentation":"

Return the results of updating the profile.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The result of the field-level encryption profile request.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"FieldLevelEncryptionProfile" + }, + "UpdatePublicKeyRequest":{ + "type":"structure", + "required":[ + "PublicKeyConfig", + "Id" + ], + "members":{ + "PublicKeyConfig":{ + "shape":"PublicKeyConfig", + "documentation":"

Request to update public key information.

", + "locationName":"PublicKeyConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + }, + "Id":{ + "shape":"string", + "documentation":"

ID of the public key to be updated.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the public key to update. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"PublicKeyConfig" + }, + "UpdatePublicKeyResult":{ + "type":"structure", + "members":{ + "PublicKey":{ + "shape":"PublicKey", + "documentation":"

Return the results of updating the public key.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the update public key result. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"PublicKey" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "documentation":"

The streaming distribution's configuration information.

", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2018-06-18/"} + }, + "Id":{ + "shape":"string", + "documentation":"

The streaming distribution's id.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The value of the ETag header that you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"If-Match" + } + }, + "documentation":"

The request to update a streaming distribution.

", + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{ + "shape":"StreamingDistribution", + "documentation":"

The streaming distribution's information.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The current version of the configuration. For example: E2QWRUHAPOMQZL.

", + "location":"header", + "locationName":"ETag" + } + }, + "documentation":"

The returned result of the corresponding request.

", + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "CloudFrontDefaultCertificate":{ + "shape":"boolean", + "documentation":"

For information about how and when to use CloudFrontDefaultCertificate, see ViewerCertificate.

" + }, + "IAMCertificateId":{ + "shape":"string", + "documentation":"

For information about how and when to use IAMCertificateId, see ViewerCertificate.

" + }, + "ACMCertificateArn":{ + "shape":"string", + "documentation":"

For information about how and when to use ACMCertificateArn, see ViewerCertificate.

" + }, + "SSLSupportMethod":{ + "shape":"SSLSupportMethod", + "documentation":"

If you specify a value for ViewerCertificate$ACMCertificateArn or for ViewerCertificate$IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests: using a method that works for all clients or one that works for most clients:

Don't specify a value for SSLSupportMethod if you specified <CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate>.

For more information, see Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide.

" + }, + "MinimumProtocolVersion":{ + "shape":"MinimumProtocolVersion", + "documentation":"

Specify the security policy that you want CloudFront to use for HTTPS connections. A security policy determines two settings:

On the CloudFront console, this setting is called Security policy.

We recommend that you specify TLSv1.1_2016 unless your users are using browsers or devices that do not support TLSv1.1 or later.

When both of the following are true, you must specify TLSv1 or later for the security policy:

If you specify true for CloudFrontDefaultCertificate, CloudFront automatically sets the security policy to TLSv1 regardless of the value that you specify for MinimumProtocolVersion.

For information about the relationship between the security policy that you choose and the protocols and ciphers that CloudFront uses to communicate with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

" + }, + "Certificate":{ + "shape":"string", + "documentation":"

This field has been deprecated. Use one of the following fields instead:

", + "deprecated":true + }, + "CertificateSource":{ + "shape":"CertificateSource", + "documentation":"

This field has been deprecated. Use one of the following fields instead:

", + "deprecated":true + } + }, + "documentation":"

A complex type that specifies the following:

You must specify only one of the following values:

Don't specify false for CloudFrontDefaultCertificate.

If you want viewers to use HTTP instead of HTTPS to request your objects: Specify the following value:

<CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate>

In addition, specify allow-all for ViewerProtocolPolicy for all of your cache behaviors.

If you want viewers to use HTTPS to request your objects: Choose the type of certificate that you want to use based on whether you're using an alternate domain name for your objects or the CloudFront domain name:

If you want viewers to use HTTPS, you must also specify one of the following values in your cache behaviors:

You can also optionally require that CloudFront use HTTPS to communicate with your origin by specifying one of the following values for the applicable origins:

For more information, see Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide.

" + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + }, + "documentation":"Amazon CloudFront

This is the Amazon CloudFront API Reference. This guide is for developers who need detailed information about CloudFront API actions, data types, and errors. For detailed information about CloudFront features, see the Amazon CloudFront Developer Guide.

" +} diff --git a/botocore/data/cloudfront/2018-06-18/waiters-2.json b/botocore/data/cloudfront/2018-06-18/waiters-2.json new file mode 100644 index 00000000..edd74b2a --- /dev/null +++ b/botocore/data/cloudfront/2018-06-18/waiters-2.json @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Distribution.Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Invalidation.Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "StreamingDistribution.Status" + } + ] + } + } +} diff --git a/botocore/data/cloudhsmv2/2017-04-28/service-2.json b/botocore/data/cloudhsmv2/2017-04-28/service-2.json index 78c1726e..a73737c2 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/service-2.json +++ b/botocore/data/cloudhsmv2/2017-04-28/service-2.json @@ -14,6 +14,22 @@ "uid":"cloudhsmv2-2017-04-28" }, "operations":{ + "CopyBackupToRegion":{ + "name":"CopyBackupToRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyBackupToRegionRequest"}, + "output":{"shape":"CopyBackupToRegionResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ] + }, "CreateCluster":{ "name":"CreateCluster", "http":{ @@ -204,7 +220,11 @@ "CreateTimestamp":{ "shape":"Timestamp", "documentation":"

The date and time when the backup was created.

" - } + }, + "CopyTimestamp":{"shape":"Timestamp"}, + "SourceRegion":{"shape":"Region"}, + "SourceBackup":{"shape":"BackupId"}, + "SourceCluster":{"shape":"ClusterId"} }, "documentation":"

Contains information about a backup of an AWS CloudHSM cluster.

" }, @@ -228,6 +248,7 @@ "type":"list", "member":{"shape":"Backup"} }, + "Boolean":{"type":"boolean"}, "Cert":{ "type":"string", "max":5000, @@ -380,6 +401,23 @@ "type":"list", "member":{"shape":"Cluster"} }, + "CopyBackupToRegionRequest":{ + "type":"structure", + "required":[ + "DestinationRegion", + "BackupId" + ], + "members":{ + "DestinationRegion":{"shape":"Region"}, + "BackupId":{"shape":"BackupId"} + } + }, + "CopyBackupToRegionResponse":{ + "type":"structure", + "members":{ + "DestinationBackup":{"shape":"DestinationBackup"} + } + }, "CreateClusterRequest":{ "type":"structure", "required":[ @@ -504,7 +542,8 @@ "Filters":{ "shape":"Filters", "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

" - } + }, + "SortAscending":{"shape":"Boolean"} } }, "DescribeBackupsResponse":{ @@ -550,13 +589,22 @@ } } }, + "DestinationBackup":{ + "type":"structure", + "members":{ + "CreateTimestamp":{"shape":"Timestamp"}, + "SourceRegion":{"shape":"Region"}, + "SourceBackup":{"shape":"BackupId"}, + "SourceCluster":{"shape":"ClusterId"} + } + }, "EniId":{ "type":"string", "pattern":"eni-[0-9a-fA-F]{8,17}" }, "ExternalAz":{ "type":"string", - "pattern":"[a-z]{2}(-(gov|isob|iso))?-(east|west|north|south|central){1,2}-\\d[a-z]" + "pattern":"[a-z]{2}(-(gov))?-(east|west|north|south|central){1,2}-\\d[a-z]" }, "ExternalSubnetMapping":{ "type":"map", @@ -719,6 +767,10 @@ "max":32, "min":7 }, + "Region":{ + "type":"string", + "pattern":"[a-z]{2}(-(gov))?-(east|west|north|south|central){1,2}-\\d" + }, "SecurityGroup":{ "type":"string", "pattern":"sg-[0-9a-fA-F]" diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index bd4bad13..a85376fb 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -415,6 +415,10 @@ "networkInterface":{ "shape":"NetworkInterface", "documentation":"

Describes a network interface.

" + }, + "encryptionKey":{ + "shape":"NonEmptyString", + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

This is expressed either as the CMK's Amazon Resource Name (ARN) or, if specified, the CMK's alias (using the format alias/alias-name ).

" } }, "documentation":"

Information about a build.

" @@ -433,6 +437,14 @@ "md5sum":{ "shape":"String", "documentation":"

The MD5 hash of the build artifact.

You can use this hash along with a checksum tool to confirm both file integrity and authenticity.

This value is available only if the build project's packaging value is set to ZIP.

" + }, + "overrideArtifactName":{ + "shape":"WrapperBoolean", + "documentation":"

If this flag is set, a name specified in the buildspec file overrides the artifact name. The name specified in a buildspec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

" + }, + "encryptionDisabled":{ + "shape":"WrapperBoolean", + "documentation":"

Information that tells you if encryption for build artifacts is disabled.

" } }, "documentation":"

Information about build output artifacts.

" @@ -535,7 +547,8 @@ "name", "source", "artifacts", - "environment" + "environment", + "serviceRole" ], "members":{ "name":{ @@ -1066,6 +1079,14 @@ "packaging":{ "shape":"ArtifactPackaging", "documentation":"

The type of build output artifact to create, as follows:

" + }, + "overrideArtifactName":{ + "shape":"WrapperBoolean", + "documentation":"

If this flag is set, a name specified in the buildspec file overrides the artifact name. The name specified in a buildspec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

" + }, + "encryptionDisabled":{ + "shape":"WrapperBoolean", + "documentation":"

Set to true if you do not want your output artifacts encrypted. This option is only valid if your artifacts type is Amazon S3. If this is set with another artifacts type, an invalidInputException will be thrown.

" } }, "documentation":"

Information about the build output artifacts for the build project.

" diff --git a/botocore/data/comprehend/2017-11-27/service-2.json b/botocore/data/comprehend/2017-11-27/service-2.json index a479a369..a31b9459 100644 --- a/botocore/data/comprehend/2017-11-27/service-2.json +++ b/botocore/data/comprehend/2017-11-27/service-2.json @@ -80,6 +80,23 @@ ], "documentation":"

Inspects a batch of documents and returns an inference of the prevailing sentiment, POSITIVE, NEUTRAL, MIXED, or NEGATIVE, in each one.

" }, + "BatchDetectSyntax":{ + "name":"BatchDetectSyntax", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDetectSyntaxRequest"}, + "output":{"shape":"BatchDetectSyntaxResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TextSizeLimitExceededException"}, + {"shape":"UnsupportedLanguageException"}, + {"shape":"BatchSizeLimitExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Inspects the text of a batch of documents for the syntax and part of speech of the words in the document and returns information about them. For more information, see how-syntax.

" + }, "DescribeDominantLanguageDetectionJob":{ "name":"DescribeDominantLanguageDetectionJob", "http":{ @@ -223,6 +240,22 @@ ], "documentation":"

Inspects text and returns an inference of the prevailing sentiment (POSITIVE, NEUTRAL, MIXED, or NEGATIVE).

" }, + "DetectSyntax":{ + "name":"DetectSyntax", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetectSyntaxRequest"}, + "output":{"shape":"DetectSyntaxResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TextSizeLimitExceededException"}, + {"shape":"UnsupportedLanguageException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Inspects text for syntax and the part of speech of words in the document. For more information, how-syntax.

" + }, "ListDominantLanguageDetectionJobs":{ "name":"ListDominantLanguageDetectionJobs", "http":{ @@ -391,7 +424,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Stops a dominant language detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + "documentation":"

Stops a dominant language detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

" }, "StopEntitiesDetectionJob":{ "name":"StopEntitiesDetectionJob", @@ -406,7 +439,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Stops an entities detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + "documentation":"

Stops an entities detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

" }, "StopKeyPhrasesDetectionJob":{ "name":"StopKeyPhrasesDetectionJob", @@ -421,7 +454,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Stops a key phrases detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + "documentation":"

Stops a key phrases detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

" }, "StopSentimentDetectionJob":{ "name":"StopSentimentDetectionJob", @@ -436,7 +469,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Stops a sentiment detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + "documentation":"

Stops a sentiment detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is be stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

" } }, "shapes":{ @@ -630,6 +663,54 @@ } } }, + "BatchDetectSyntaxItemResult":{ + "type":"structure", + "members":{ + "Index":{ + "shape":"Integer", + "documentation":"

The zero-based index of the document in the input list.

" + }, + "SyntaxTokens":{ + "shape":"ListOfSyntaxTokens", + "documentation":"

The syntax tokens for the words in the document, one token for each word.

" + } + }, + "documentation":"

The result of calling the operation. The operation returns one object that is successfully processed by the operation.

" + }, + "BatchDetectSyntaxRequest":{ + "type":"structure", + "required":[ + "TextList", + "LanguageCode" + ], + "members":{ + "TextList":{ + "shape":"StringList", + "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" + }, + "LanguageCode":{ + "shape":"SyntaxLanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" + } + } + }, + "BatchDetectSyntaxResponse":{ + "type":"structure", + "required":[ + "ResultList", + "ErrorList" + ], + "members":{ + "ResultList":{ + "shape":"ListOfDetectSyntaxResult", + "documentation":"

A list of objects containing the results of the operation. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If all of the documents contain an error, the ResultList is empty.

" + }, + "ErrorList":{ + "shape":"BatchItemErrorList", + "documentation":"

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

" + } + } + }, "BatchItemError":{ "type":"structure", "members":{ @@ -862,6 +943,32 @@ } } }, + "DetectSyntaxRequest":{ + "type":"structure", + "required":[ + "Text", + "LanguageCode" + ], + "members":{ + "Text":{ + "shape":"String", + "documentation":"

A UTF-8 string. Each string must contain fewer that 5,000 bytes of UTF encoded characters.

" + }, + "LanguageCode":{ + "shape":"SyntaxLanguageCode", + "documentation":"

The language code of the input documents. You can specify English (\"en\") or Spanish (\"es\").

" + } + } + }, + "DetectSyntaxResponse":{ + "type":"structure", + "members":{ + "SyntaxTokens":{ + "shape":"ListOfSyntaxTokens", + "documentation":"

A collection of syntax tokens describing the text. For each token, the response provides the text, the token type, where the text begins and ends, and the level of confidence that Amazon Comprehend has that the token is correct. For a list of token types, see how-syntax.

" + } + } + }, "DominantLanguage":{ "type":"structure", "members":{ @@ -932,6 +1039,10 @@ "OutputDataConfig":{ "shape":"OutputDataConfig", "documentation":"

The output data configuration that you supplied when you created the dominant language detection job.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

" } }, "documentation":"

Provides information about a dominant language detection job.

" @@ -1000,6 +1111,10 @@ "LanguageCode":{ "shape":"LanguageCode", "documentation":"

The language code of the input documents.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

" } }, "documentation":"

Provides information about an entities detection job.

" @@ -1213,6 +1328,10 @@ "LanguageCode":{ "shape":"LanguageCode", "documentation":"

The language code of the input documents.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

" } }, "documentation":"

Provides information about a key phrases detection job.

" @@ -1334,6 +1453,10 @@ "type":"list", "member":{"shape":"BatchDetectSentimentItemResult"} }, + "ListOfDetectSyntaxResult":{ + "type":"list", + "member":{"shape":"BatchDetectSyntaxItemResult"} + }, "ListOfDominantLanguages":{ "type":"list", "member":{"shape":"DominantLanguage"} @@ -1346,6 +1469,10 @@ "type":"list", "member":{"shape":"KeyPhrase"} }, + "ListOfSyntaxTokens":{ + "type":"list", + "member":{"shape":"SyntaxToken"} + }, "ListSentimentDetectionJobsRequest":{ "type":"structure", "members":{ @@ -1427,6 +1554,42 @@ }, "documentation":"

Provides configuration parameters for the output of topic detection jobs.

" }, + "PartOfSpeechTag":{ + "type":"structure", + "members":{ + "Tag":{ + "shape":"PartOfSpeechTagType", + "documentation":"

Identifies the part of speech that the token represents.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The confidence that Amazon Comprehend has that the part of speech was correctly identified.

" + } + }, + "documentation":"

Identifies the part of speech represented by the token and gives the confidence that Amazon Comprehend has that the part of speech was correctly identified. For more information about the parts of speech that Amazon Comprehend can identify, see how-syntax.

" + }, + "PartOfSpeechTagType":{ + "type":"string", + "enum":[ + "ADJ", + "ADP", + "ADV", + "AUX", + "CONJ", + "DET", + "INTJ", + "NOUN", + "NUM", + "O", + "PART", + "PRON", + "PROPN", + "PUNCT", + "SCONJ", + "SYM", + "VERB" + ] + }, "S3Uri":{ "type":"string", "max":1024, @@ -1492,6 +1655,10 @@ "LanguageCode":{ "shape":"LanguageCode", "documentation":"

The language code of the input documents.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

" } }, "documentation":"

Provides information about a sentiment detection job.

" @@ -1549,7 +1716,7 @@ }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

" }, "JobName":{ "shape":"JobName", @@ -1594,7 +1761,7 @@ }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

" }, "JobName":{ "shape":"JobName", @@ -1643,7 +1810,7 @@ }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

" }, "JobName":{ "shape":"JobName", @@ -1692,7 +1859,7 @@ }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

" }, "JobName":{ "shape":"JobName", @@ -1740,7 +1907,7 @@ }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

" }, "JobName":{ "shape":"JobName", @@ -1789,7 +1956,7 @@ }, "JobStatus":{ "shape":"JobStatus", - "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopDominantLanguageDetectionJob operation.

" + "documentation":"

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopDominantLanguageDetectionJob operation.

" } } }, @@ -1812,7 +1979,7 @@ }, "JobStatus":{ "shape":"JobStatus", - "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopEntitiesDetectionJob operation.

" + "documentation":"

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopEntitiesDetectionJob operation.

" } } }, @@ -1835,7 +2002,7 @@ }, "JobStatus":{ "shape":"JobStatus", - "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopKeyPhrasesDetectionJob operation.

" + "documentation":"

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopKeyPhrasesDetectionJob operation.

" } } }, @@ -1858,7 +2025,7 @@ }, "JobStatus":{ "shape":"JobStatus", - "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopSentimentDetectionJob operation.

" + "documentation":"

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopSentimentDetectionJob operation.

" } } }, @@ -1870,6 +2037,36 @@ "type":"list", "member":{"shape":"String"} }, + "SyntaxLanguageCode":{ + "type":"string", + "enum":["en"] + }, + "SyntaxToken":{ + "type":"structure", + "members":{ + "TokenId":{ + "shape":"Integer", + "documentation":"

A unique identifier for a token.

" + }, + "Text":{ + "shape":"String", + "documentation":"

The word that was recognized in the source text.

" + }, + "BeginOffset":{ + "shape":"Integer", + "documentation":"

The zero-based offset from the beginning of the source text to the first character in the word.

" + }, + "EndOffset":{ + "shape":"Integer", + "documentation":"

The zero-based offset from the beginning of the source text to the last character in the word.

" + }, + "PartOfSpeech":{ + "shape":"PartOfSpeechTag", + "documentation":"

Provides the part of speech label and the confidence level that Amazon Comprehend has that the part of speech was correctly identified. For more information, see how-syntax.

" + } + }, + "documentation":"

Represents a work in the input text that was recognized and assigned a part of speech. There is one syntax token record for each word in the source text.

" + }, "TextSizeLimitExceededException":{ "type":"structure", "members":{ diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index cefa015f..c8e4a5b4 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -3069,7 +3069,11 @@ }, "ResourceCreationTime":{"type":"timestamp"}, "ResourceDeletionTime":{"type":"timestamp"}, - "ResourceId":{"type":"string"}, + "ResourceId":{ + "type":"string", + "max":768, + "min":1 + }, "ResourceIdList":{ "type":"list", "member":{"shape":"ResourceId"} diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 6649a766..3834d251 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -13,6 +13,178 @@ "uid":"connect-2017-08-08" }, "operations":{ + "CreateUser":{ + "name":"CreateUser", + "http":{ + "method":"PUT", + "requestUri":"/users/{InstanceId}" + }, + "input":{"shape":"CreateUserRequest"}, + "output":{"shape":"CreateUserResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a new user account in your Amazon Connect instance.

" + }, + "DeleteUser":{ + "name":"DeleteUser", + "http":{ + "method":"DELETE", + "requestUri":"/users/{InstanceId}/{UserId}" + }, + "input":{"shape":"DeleteUserRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes a user account from Amazon Connect.

" + }, + "DescribeUser":{ + "name":"DescribeUser", + "http":{ + "method":"GET", + "requestUri":"/users/{InstanceId}/{UserId}" + }, + "input":{"shape":"DescribeUserRequest"}, + "output":{"shape":"DescribeUserResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a User object that contains information about the user account specified by the UserId.

" + }, + "DescribeUserHierarchyGroup":{ + "name":"DescribeUserHierarchyGroup", + "http":{ + "method":"GET", + "requestUri":"/user-hierarchy-groups/{InstanceId}/{HierarchyGroupId}" + }, + "input":{"shape":"DescribeUserHierarchyGroupRequest"}, + "output":{"shape":"DescribeUserHierarchyGroupResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a HierarchyGroup object that includes information about a hierarchy group in your instance.

" + }, + "DescribeUserHierarchyStructure":{ + "name":"DescribeUserHierarchyStructure", + "http":{ + "method":"GET", + "requestUri":"/user-hierarchy-structure/{InstanceId}" + }, + "input":{"shape":"DescribeUserHierarchyStructureRequest"}, + "output":{"shape":"DescribeUserHierarchyStructureResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a HiearchyGroupStructure object, which contains data about the levels in the agent hierarchy.

" + }, + "GetFederationToken":{ + "name":"GetFederationToken", + "http":{ + "method":"GET", + "requestUri":"/user/federate/{InstanceId}" + }, + "input":{"shape":"GetFederationTokenRequest"}, + "output":{"shape":"GetFederationTokenResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UserNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"DuplicateResourceException"} + ], + "documentation":"

Retrieves a token for federation.

" + }, + "ListRoutingProfiles":{ + "name":"ListRoutingProfiles", + "http":{ + "method":"GET", + "requestUri":"/routing-profiles-summary/{InstanceId}" + }, + "input":{"shape":"ListRoutingProfilesRequest"}, + "output":{"shape":"ListRoutingProfilesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns an array of RoutingProfileSummary objects that includes information about the routing profiles in your instance.

" + }, + "ListSecurityProfiles":{ + "name":"ListSecurityProfiles", + "http":{ + "method":"GET", + "requestUri":"/security-profiles-summary/{InstanceId}" + }, + "input":{"shape":"ListSecurityProfilesRequest"}, + "output":{"shape":"ListSecurityProfilesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns an array of SecurityProfileSummary objects that contain information about the security profiles in your instance, including the ARN, Id, and Name of the security profile.

" + }, + "ListUserHierarchyGroups":{ + "name":"ListUserHierarchyGroups", + "http":{ + "method":"GET", + "requestUri":"/user-hierarchy-groups-summary/{InstanceId}" + }, + "input":{"shape":"ListUserHierarchyGroupsRequest"}, + "output":{"shape":"ListUserHierarchyGroupsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a UserHierarchyGroupSummaryList, which is an array of HierarchyGroupSummary objects that contain information about the hierarchy groups in your instance.

" + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"GET", + "requestUri":"/users-summary/{InstanceId}" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a UserSummaryList, which is an array of UserSummary objects.

" + }, "StartOutboundVoiceContact":{ "name":"StartOutboundVoiceContact", "http":{ @@ -30,7 +202,7 @@ {"shape":"DestinationNotAllowedException"}, {"shape":"OutboundContactNotPermittedException"} ], - "documentation":"

The StartOutboundVoiceContact operation initiates a contact flow to place an outbound call to a customer.

There is a throttling limit placed on usage of the API that includes a RateLimit of 2 per second, and a BurstLimit of 5 per second.

If you are using an IAM account, it must have permissions to the connect:StartOutboundVoiceContact action.

" + "documentation":"

The StartOutboundVoiceContact operation initiates a contact flow to place an outbound call to a customer.

There is a throttling limit placed on usage of the API that includes a RateLimit of 2 per second, and a BurstLimit of 5 per second.

If you are using an IAM account, it must have permission to the connect:StartOutboundVoiceContact action.

" }, "StopContact":{ "name":"StopContact", @@ -47,10 +219,111 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Ends the contact initiated by the StartOutboundVoiceContact operation.

If you are using an IAM account, it must have permissions to the connect:StopContact operation.

" + "documentation":"

Ends the contact initiated by the StartOutboundVoiceContact operation.

If you are using an IAM account, it must have permission to the connect:StopContact action.

" + }, + "UpdateUserHierarchy":{ + "name":"UpdateUserHierarchy", + "http":{ + "method":"POST", + "requestUri":"/users/{InstanceId}/{UserId}/hierarchy" + }, + "input":{"shape":"UpdateUserHierarchyRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Assigns the specified hierarchy group to the user.

" + }, + "UpdateUserIdentityInfo":{ + "name":"UpdateUserIdentityInfo", + "http":{ + "method":"POST", + "requestUri":"/users/{InstanceId}/{UserId}/identity-info" + }, + "input":{"shape":"UpdateUserIdentityInfoRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Updates the identity information for the specified user in a UserIdentityInfo object, including email, first name, and last name.

" + }, + "UpdateUserPhoneConfig":{ + "name":"UpdateUserPhoneConfig", + "http":{ + "method":"POST", + "requestUri":"/users/{InstanceId}/{UserId}/phone-config" + }, + "input":{"shape":"UpdateUserPhoneConfigRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Updates the phone configuration settings in the UserPhoneConfig object for the specified user.

" + }, + "UpdateUserRoutingProfile":{ + "name":"UpdateUserRoutingProfile", + "http":{ + "method":"POST", + "requestUri":"/users/{InstanceId}/{UserId}/routing-profile" + }, + "input":{"shape":"UpdateUserRoutingProfileRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Assigns the specified routing profile to a user.

" + }, + "UpdateUserSecurityProfiles":{ + "name":"UpdateUserSecurityProfiles", + "http":{ + "method":"POST", + "requestUri":"/users/{InstanceId}/{UserId}/security-profiles" + }, + "input":{"shape":"UpdateUserSecurityProfilesRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Update the security profiles assigned to the user.

" } }, "shapes":{ + "ARN":{"type":"string"}, + "AfterContactWorkTimeLimit":{ + "type":"integer", + "min":0 + }, + "AgentFirstName":{ + "type":"string", + "max":100, + "min":1 + }, + "AgentLastName":{ + "type":"string", + "max":100, + "min":1 + }, + "AgentUsername":{ + "type":"string", + "max":20, + "min":1, + "pattern":"[a-zA-Z0-9\\_\\-\\.]+" + }, "AttributeName":{ "type":"string", "documentation":"Key for the key value pair to be used for additional attributes.", @@ -69,6 +342,7 @@ "value":{"shape":"AttributeValue"}, "documentation":"Additional attributes can be provided in the request using this field. This will be passed to the contact flow execution. Client can make use of this additional info in their contact flow." }, + "AutoAccept":{"type":"boolean"}, "ClientToken":{ "type":"string", "documentation":"Dedupe token to be provided by the client. This token is used to avoid duplicate calls to the customer.", @@ -97,6 +371,193 @@ "error":{"httpStatusCode":410}, "exception":true }, + "CreateUserRequest":{ + "type":"structure", + "required":[ + "Username", + "PhoneConfig", + "SecurityProfileIds", + "RoutingProfileId", + "InstanceId" + ], + "members":{ + "Username":{ + "shape":"AgentUsername", + "documentation":"

The user name in Amazon Connect for the user to create.

" + }, + "Password":{ + "shape":"Password", + "documentation":"

The password for the user account to create. This is required if you are using Amazon Connect for identity management. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.

" + }, + "IdentityInfo":{ + "shape":"UserIdentityInfo", + "documentation":"

Information about the user, including email address, first name, and last name.

" + }, + "PhoneConfig":{ + "shape":"UserPhoneConfig", + "documentation":"

Specifies the phone settings for the user, including AfterContactWorkTimeLimit, AutoAccept, DeskPhoneNumber, and PhoneType.

" + }, + "DirectoryUserId":{ + "shape":"DirectoryUserId", + "documentation":"

The unique identifier for the user account in the directory service directory used for identity management. If Amazon Connect is unable to access the existing directory, you can use the DirectoryUserId to authenticate users. If you include the parameter, it is assumed that Amazon Connect cannot access the directory. If the parameter is not included, the UserIdentityInfo is used to authenticate users from your existing directory.

This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.

" + }, + "SecurityProfileIds":{ + "shape":"SecurityProfileIds", + "documentation":"

The unique identifier of the security profile to assign to the user created.

" + }, + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"

The unique identifier for the routing profile to assign to the user created.

" + }, + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The unique identifier for the hierarchy group to assign to the user created.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "CreateUserResponse":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

The unique identifier for the user account in Amazon Connect

" + }, + "UserArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the user account created.

" + } + } + }, + "Credentials":{ + "type":"structure", + "members":{ + "AccessToken":{ + "shape":"SecurityToken", + "documentation":"

An access token generated for a federated user to access Amazon Connect

" + }, + "AccessTokenExpiration":{ + "shape":"timestamp", + "documentation":"

A token generated with an expiration time for the session a user is logged in to Amazon Connect

" + }, + "RefreshToken":{ + "shape":"SecurityToken", + "documentation":"

Renews a token generated for a user to access the Amazon Connect instance.

" + }, + "RefreshTokenExpiration":{ + "shape":"timestamp", + "documentation":"

Renews the expiration timer for a generated token.

" + } + }, + "documentation":"

The credentials to use for federation.

" + }, + "DeleteUserRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "UserId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The unique identifier of the user to delete.

", + "location":"uri", + "locationName":"UserId" + } + } + }, + "DescribeUserHierarchyGroupRequest":{ + "type":"structure", + "required":[ + "HierarchyGroupId", + "InstanceId" + ], + "members":{ + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier for the hierarchy group to return.

", + "location":"uri", + "locationName":"HierarchyGroupId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "DescribeUserHierarchyGroupResponse":{ + "type":"structure", + "members":{ + "HierarchyGroup":{ + "shape":"HierarchyGroup", + "documentation":"

Returns a HierarchyGroup object.

" + } + } + }, + "DescribeUserHierarchyStructureRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "DescribeUserHierarchyStructureResponse":{ + "type":"structure", + "members":{ + "HierarchyStructure":{ + "shape":"HierarchyStructure", + "documentation":"

A HierarchyStructure object.

" + } + } + }, + "DescribeUserRequest":{ + "type":"structure", + "required":[ + "UserId", + "InstanceId" + ], + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

Unique identifier for the user account to return.

", + "location":"uri", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "DescribeUserResponse":{ + "type":"structure", + "members":{ + "User":{ + "shape":"User", + "documentation":"

A User object that contains information about the user account and configuration settings.

" + } + } + }, "DestinationNotAllowedException":{ "type":"structure", "members":{ @@ -105,13 +566,169 @@ "documentation":"

The message.

" } }, - "documentation":"

Outbound calls to the destination number are not allowed for your instance. You can request that the country be included in the allowed countries for your instance by submitting a Service Limit Increase.

", + "documentation":"

Outbound calls to the destination number are not allowed.

", "error":{"httpStatusCode":403}, "exception":true }, + "DirectoryUserId":{"type":"string"}, + "DuplicateResourceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

A resource with that name already exisits.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Email":{"type":"string"}, + "GetFederationTokenRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "GetFederationTokenResponse":{ + "type":"structure", + "members":{ + "Credentials":{ + "shape":"Credentials", + "documentation":"

The credentials to use for federation.

" + } + } + }, + "HierarchyGroup":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier for the hierarchy group.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the hierarchy group.

" + }, + "Name":{ + "shape":"HierarchyGroupName", + "documentation":"

The name of the hierarchy group in your instance.

" + }, + "LevelId":{ + "shape":"HierarchyLevelId", + "documentation":"

The identifier for the level in the hierarchy group.

" + }, + "HierarchyPath":{ + "shape":"HierarchyPath", + "documentation":"

A HierarchyPath object that contains information about the levels in the hierarchy group.

" + } + }, + "documentation":"

A HierarchyGroup object that contains information about a hierarchy group in your Amazon Connect instance.

" + }, + "HierarchyGroupId":{"type":"string"}, + "HierarchyGroupName":{"type":"string"}, + "HierarchyGroupSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier of the hierarchy group.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The ARN for the hierarchy group.

" + }, + "Name":{ + "shape":"HierarchyGroupName", + "documentation":"

The name of the hierarchy group.

" + } + }, + "documentation":"

A HierarchyGroupSummary object that contains information about the hierarchy group, including ARN, Id, and Name.

" + }, + "HierarchyGroupSummaryList":{ + "type":"list", + "member":{"shape":"HierarchyGroupSummary"} + }, + "HierarchyLevel":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"HierarchyLevelId", + "documentation":"

The identifier for the hierarchy group level.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The ARN for the hierarchy group level.

" + }, + "Name":{ + "shape":"HierarchyLevelName", + "documentation":"

The name of the hierarchy group level.

" + } + }, + "documentation":"

A HierarchyLevel object that contains information about the levels in a hierarchy group, including ARN, Id, and Name.

" + }, + "HierarchyLevelId":{"type":"string"}, + "HierarchyLevelName":{"type":"string"}, + "HierarchyPath":{ + "type":"structure", + "members":{ + "LevelOne":{ + "shape":"HierarchyGroupSummary", + "documentation":"

A HierarchyGroupSummary object that contains information about the level of the hierarchy group, including ARN, Id, and Name.

" + }, + "LevelTwo":{ + "shape":"HierarchyGroupSummary", + "documentation":"

A HierarchyGroupSummary object that contains information about the level of the hierarchy group, including ARN, Id, and Name.

" + }, + "LevelThree":{ + "shape":"HierarchyGroupSummary", + "documentation":"

A HierarchyGroupSummary object that contains information about the level of the hierarchy group, including ARN, Id, and Name.

" + }, + "LevelFour":{ + "shape":"HierarchyGroupSummary", + "documentation":"

A HierarchyGroupSummary object that contains information about the level of the hierarchy group, including ARN, Id, and Name.

" + }, + "LevelFive":{ + "shape":"HierarchyGroupSummary", + "documentation":"

A HierarchyGroupSummary object that contains information about the level of the hierarchy group, including ARN, Id, and Name.

" + } + }, + "documentation":"

A HierarchyPath object that contains information about the levels of the hierarchy group.

" + }, + "HierarchyStructure":{ + "type":"structure", + "members":{ + "LevelOne":{ + "shape":"HierarchyLevel", + "documentation":"

A HierarchyLevel object that contains information about the hierarchy group level.

" + }, + "LevelTwo":{ + "shape":"HierarchyLevel", + "documentation":"

A HierarchyLevel object that contains information about the hierarchy group level.

" + }, + "LevelThree":{ + "shape":"HierarchyLevel", + "documentation":"

A HierarchyLevel object that contains information about the hierarchy group level.

" + }, + "LevelFour":{ + "shape":"HierarchyLevel", + "documentation":"

A HierarchyLevel object that contains information about the hierarchy group level.

" + }, + "LevelFive":{ + "shape":"HierarchyLevel", + "documentation":"

A HierarchyLevel object that contains information about the hierarchy group level.

" + } + }, + "documentation":"

A HierarchyStructure object that contains information about the hierarchy group structure.

" + }, "InstanceId":{ "type":"string", - "documentation":"Amazon Connect Organization ARN. A client must provide its organization ARN in order to place a call. This defines the call from organization." + "documentation":"Amazon Connect Organization ARN. A client must provide its organization ARN in order to place a call. This defines the call from organization.", + "max":100, + "min":1 }, "InternalServiceException":{ "type":"structure", @@ -161,7 +778,165 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListRoutingProfilesRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximum number of routing profiles to return in the response.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListRoutingProfilesResponse":{ + "type":"structure", + "members":{ + "RoutingProfileSummaryList":{ + "shape":"RoutingProfileSummaryList", + "documentation":"

An array of RoutingProfileSummary objects that include the ARN, Id, and Name of the routing profile.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

" + } + } + }, + "ListSecurityProfilesRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximum number of security profiles to return.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSecurityProfilesResponse":{ + "type":"structure", + "members":{ + "SecurityProfileSummaryList":{ + "shape":"SecurityProfileSummaryList", + "documentation":"

An array of SecurityProfileSummary objects.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

" + } + } + }, + "ListUserHierarchyGroupsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximum number of hierarchy groups to return.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListUserHierarchyGroupsResponse":{ + "type":"structure", + "members":{ + "UserHierarchyGroupSummaryList":{ + "shape":"HierarchyGroupSummaryList", + "documentation":"

An array of HierarchyGroupSummary objects.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

" + } + } + }, + "ListUsersRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximum number of results to return in the response.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "UserSummaryList":{ + "shape":"UserSummaryList", + "documentation":"

An array of UserSummary objects that contain information about the users in your instance.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

" + } + } + }, + "MaxResult1000":{ + "type":"integer", + "max":1000, + "min":1 + }, "Message":{"type":"string"}, + "NextToken":{"type":"string"}, "OutboundContactNotPermittedException":{ "type":"structure", "members":{ @@ -170,14 +945,25 @@ "documentation":"

The message.

" } }, - "documentation":"

The contact is not permitted because outbound calling is not enabled for the instance.

", + "documentation":"

The contact is not permitted.

", "error":{"httpStatusCode":403}, "exception":true }, + "Password":{ + "type":"string", + "pattern":"/^(?=.*[a-z])(?=.*[A-Z])(?=.*\\d)[a-zA-Z\\d\\S]{8,}$/" + }, "PhoneNumber":{ "type":"string", "documentation":"End customer's phone number to call." }, + "PhoneType":{ + "type":"string", + "enum":[ + "SOFT_PHONE", + "DESK_PHONE" + ] + }, "QueueId":{ "type":"string", "documentation":"Identifier of the queue to be used for the contact routing." @@ -194,6 +980,68 @@ "error":{"httpStatusCode":404}, "exception":true }, + "RoutingProfileId":{"type":"string"}, + "RoutingProfileName":{ + "type":"string", + "max":100, + "min":1 + }, + "RoutingProfileSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"RoutingProfileId", + "documentation":"

The identifier of the routing profile.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The ARN of the routing profile.

" + }, + "Name":{ + "shape":"RoutingProfileName", + "documentation":"

The name of the routing profile.

" + } + }, + "documentation":"

A RoutingProfileSummary object that contains information about a routing profile, including ARN, Id, and Name.

" + }, + "RoutingProfileSummaryList":{ + "type":"list", + "member":{"shape":"RoutingProfileSummary"} + }, + "SecurityProfileId":{"type":"string"}, + "SecurityProfileIds":{ + "type":"list", + "member":{"shape":"SecurityProfileId"}, + "max":10, + "min":1 + }, + "SecurityProfileName":{"type":"string"}, + "SecurityProfileSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"SecurityProfileId", + "documentation":"

The identifier of the security profile.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The ARN of the security profile.

" + }, + "Name":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile.

" + } + }, + "documentation":"

A SecurityProfileSummary object that contains information about a security profile, including ARN, Id, Name.

" + }, + "SecurityProfileSummaryList":{ + "type":"list", + "member":{"shape":"SecurityProfileSummary"} + }, + "SecurityToken":{ + "type":"string", + "sensitive":true + }, "StartOutboundVoiceContactRequest":{ "type":"structure", "required":[ @@ -204,15 +1052,15 @@ "members":{ "DestinationPhoneNumber":{ "shape":"PhoneNumber", - "documentation":"

The phone number, in E.164 format, of the customer to call with the outbound contact.

" + "documentation":"

The phone number of the customer in E.164 format.

" }, "ContactFlowId":{ "shape":"ContactFlowId", - "documentation":"

The identifier for the contact flow to execute for the outbound call. This is a GUID value only. Amazon Resource Name (ARN) values are not supported.

To find the ContactFlowId, open the contact flow to use in the Amazon Connect contact flow designer. The ID for the contact flow is displayed in the address bar as part of the URL. For example, an address displayed when you open a contact flow is similar to the following: https://myconnectinstance.awsapps.com/connect/contact-flows/edit?id=arn:aws:connect:us-east-1:361814831152:instance/2fb42df9-78a2-4b99-b484-f5cf80dc300c/contact-flow/b0b8f2dd-ed1b-4c44-af36-ce189a178181 . At the end of the URL, you see contact-flow/b0b8f2dd-ed1b-4c44-af36-ce189a178181. The ContactFlowID for this contact flow is b0b8f2dd-ed1b-4c44-af36-ce189a178181 . Make sure to include only the GUID after the \"contact-flow/\" in your requests.

" + "documentation":"

The identifier for the contact flow to connect the outbound call to.

To find the ContactFlowId, open the contact flow you want to use in the Amazon Connect contact flow editor. The ID for the contact flow is displayed in the address bar as part of the URL. For example, the contact flow ID is the set of characters at the end of the URL, after 'contact-flow/' such as 78ea8fd5-2659-4f2b-b528-699760ccfc1b.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The identifier for your Amazon Connect instance. To find the InstanceId value for your Amazon Connect instance, open the Amazon Connect console. Select the instance alias of the instance and view the instance ID in the Overview section. For example, the instance ID is the set of characters at the end of the instance ARN, after \"instance/\", such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

" + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

" }, "ClientToken":{ "shape":"ClientToken", @@ -221,15 +1069,15 @@ }, "SourcePhoneNumber":{ "shape":"PhoneNumber", - "documentation":"

The phone number, in E.164 format, associated with your Amazon Connect instance to use to place the outbound call.

" + "documentation":"

The phone number, in E.164 format, associated with your Amazon Connect instance to use for the outbound call.

" }, "QueueId":{ "shape":"QueueId", - "documentation":"

The queue to which to add the call. If you specify a queue, the phone displayed for caller ID is the phone number defined for the queue. If you do not specify a queue, the queue used is the queue defined in the contact flow specified by ContactFlowId.

To find the QueueId, open the queue to use in the Amazon Connect queue editor. The ID for the queue is displayed in the address bar as part of the URL. For example, the QueueId value is the set of characters at the end of the URL, after \"queue/\", such as aeg40574-2d01-51c3-73d6-bf8624d2168c.

" + "documentation":"

The queue to add the call to. If you specify a queue, the phone displayed for caller ID is the phone number specified in the queue. If you do not specify a queue, the queue used will be the queue defined in the contact flow.

To find the QueueId, open the queue you want to use in the Amazon Connect Queue editor. The ID for the queue is displayed in the address bar as part of the URL. For example, the queue ID is the set of characters at the end of the URL, after 'queue/' such as queue/aeg40574-2d01-51c3-73d6-bf8624d2168c.

" }, "Attributes":{ "shape":"Attributes", - "documentation":"

Specify a custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in contact flows just like any other contact attributes.

There can be up to 32,768 UTF-8 bytes across all key-value pairs. Attribute keys can include only alphanumeric, dash, and underscore characters.

For example, to play a greeting when the customer answers the call, you can pass the customer name in attributes similar to the following:

" + "documentation":"

Specify a custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in contact flows just like any other contact attributes.

There can be up to 32,768 UTF-8 bytes across all key-value pairs. Attribute keys can include only alphanumeric, dash, and underscore characters.

For example, if you want play a greeting when the customer answers the call, you can pass the customer name in attributes similar to the following:

" } } }, @@ -251,11 +1099,11 @@ "members":{ "ContactId":{ "shape":"ContactId", - "documentation":"

The unique identifier of the contact to end. This is the ContactId value returned from the StartOutboundVoiceContact operation.

" + "documentation":"

The unique identifier of the contact to end.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The identifier of the Amazon Connect instance in which the contact is active.

" + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

" } } }, @@ -263,7 +1111,261 @@ "type":"structure", "members":{ } - } + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The throttling limit has been exceeded.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UpdateUserHierarchyRequest":{ + "type":"structure", + "required":[ + "UserId", + "InstanceId" + ], + "members":{ + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier for the hierarchy group to assign to the user.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier of the user account to assign the hierarchy group to.

", + "location":"uri", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "UpdateUserIdentityInfoRequest":{ + "type":"structure", + "required":[ + "IdentityInfo", + "UserId", + "InstanceId" + ], + "members":{ + "IdentityInfo":{ + "shape":"UserIdentityInfo", + "documentation":"

A UserIdentityInfo object.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier for the user account to update identity information for.

", + "location":"uri", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "UpdateUserPhoneConfigRequest":{ + "type":"structure", + "required":[ + "PhoneConfig", + "UserId", + "InstanceId" + ], + "members":{ + "PhoneConfig":{ + "shape":"UserPhoneConfig", + "documentation":"

A UserPhoneConfig object that contains settings for AfterContactWorkTimeLimit, AutoAccept, DeskPhoneNumber, and PhoneType to assign to the user.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier for the user account to change phone settings for.

", + "location":"uri", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "UpdateUserRoutingProfileRequest":{ + "type":"structure", + "required":[ + "RoutingProfileId", + "UserId", + "InstanceId" + ], + "members":{ + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"

The identifier of the routing profile to assign to the user.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier for the user account to assign the routing profile to.

", + "location":"uri", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "UpdateUserSecurityProfilesRequest":{ + "type":"structure", + "required":[ + "SecurityProfileIds", + "UserId", + "InstanceId" + ], + "members":{ + "SecurityProfileIds":{ + "shape":"SecurityProfileIds", + "documentation":"

The identifiers for the security profiles to assign to the user.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier of the user account to assign the security profiles.

", + "location":"uri", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "User":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"UserId", + "documentation":"

The identifier of the user account.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The ARN of the user account.

" + }, + "Username":{ + "shape":"AgentUsername", + "documentation":"

The user name assigned to the user account.

" + }, + "IdentityInfo":{ + "shape":"UserIdentityInfo", + "documentation":"

A UserIdentityInfo object.

" + }, + "PhoneConfig":{ + "shape":"UserPhoneConfig", + "documentation":"

A UserPhoneConfig object.

" + }, + "DirectoryUserId":{ + "shape":"DirectoryUserId", + "documentation":"

The directory Id for the user account in the existing directory used for identity management.

" + }, + "SecurityProfileIds":{ + "shape":"SecurityProfileIds", + "documentation":"

The identifier(s) for the security profile assigned to the user.

" + }, + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"

The identifier of the routing profile assigned to the user.

" + }, + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier for the hierarchy group assigned to the user.

" + } + }, + "documentation":"

A User object that contains information about a user account in your Amazon Connect instance, including configuration settings.

" + }, + "UserId":{"type":"string"}, + "UserIdentityInfo":{ + "type":"structure", + "members":{ + "FirstName":{ + "shape":"AgentFirstName", + "documentation":"

The first name used in the user account. This is required if you are using Amazon Connect or SAML for identity management.

" + }, + "LastName":{ + "shape":"AgentLastName", + "documentation":"

The last name used in the user account. This is required if you are using Amazon Connect or SAML for identity management.

" + }, + "Email":{ + "shape":"Email", + "documentation":"

The email address added to the user account. If you are using SAML for identity management and include this parameter, an InvalidRequestException is returned.

" + } + }, + "documentation":"

A UserIdentityInfo object that contains information about the user's identity, including email address, first name, and last name.

" + }, + "UserNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

No user with the specified credentials was found in the Amazon Connect instance.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "UserPhoneConfig":{ + "type":"structure", + "required":["PhoneType"], + "members":{ + "PhoneType":{ + "shape":"PhoneType", + "documentation":"

The phone type selected for the user, either Soft phone or Desk phone.

" + }, + "AutoAccept":{ + "shape":"AutoAccept", + "documentation":"

The Auto accept setting for the user, Yes or No.

" + }, + "AfterContactWorkTimeLimit":{ + "shape":"AfterContactWorkTimeLimit", + "documentation":"

The After Call Work (ACW) timeout setting, in seconds, for the user.

" + }, + "DeskPhoneNumber":{ + "shape":"PhoneNumber", + "documentation":"

The phone number for the user's desk phone.

" + } + }, + "documentation":"

A UserPhoneConfig object that contains information about the user phone configuration settings.

" + }, + "UserSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"UserId", + "documentation":"

The identifier for the user account.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The ARN for the user account.

" + }, + "Username":{ + "shape":"AgentUsername", + "documentation":"

The Amazon Connect user name for the user account.

" + } + }, + "documentation":"

A UserSummary object that contains Information about a user, including ARN, Id, and user name.

" + }, + "UserSummaryList":{ + "type":"list", + "member":{"shape":"UserSummary"} + }, + "timestamp":{"type":"timestamp"} }, "documentation":"

The Amazon Connect API Reference provides descriptions, syntax, and usage examples for each of the Amazon Connect actions, data types, parameters, and errors. Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale.

" } diff --git a/botocore/data/dax/2017-04-19/service-2.json b/botocore/data/dax/2017-04-19/service-2.json index 1c2cf5fc..b24a4576 100644 --- a/botocore/data/dax/2017-04-19/service-2.json +++ b/botocore/data/dax/2017-04-19/service-2.json @@ -33,6 +33,7 @@ {"shape":"NodeQuotaForCustomerExceededFault"}, {"shape":"InvalidVPCNetworkStateFault"}, {"shape":"TagQuotaPerResourceExceeded"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -50,6 +51,7 @@ {"shape":"ParameterGroupQuotaExceededFault"}, {"shape":"ParameterGroupAlreadyExistsFault"}, {"shape":"InvalidParameterGroupStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -67,7 +69,8 @@ {"shape":"SubnetGroupAlreadyExistsFault"}, {"shape":"SubnetGroupQuotaExceededFault"}, {"shape":"SubnetQuotaExceededFault"}, - {"shape":"InvalidSubnet"} + {"shape":"InvalidSubnet"}, + {"shape":"ServiceLinkedRoleNotFoundFault"} ], "documentation":"

Creates a new subnet group.

" }, @@ -83,6 +86,7 @@ {"shape":"ClusterNotFoundFault"}, {"shape":"NodeNotFoundFault"}, {"shape":"InvalidClusterStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -99,6 +103,7 @@ "errors":[ {"shape":"ClusterNotFoundFault"}, {"shape":"InvalidClusterStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -115,6 +120,7 @@ "errors":[ {"shape":"InvalidParameterGroupStateFault"}, {"shape":"ParameterGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -130,7 +136,8 @@ "output":{"shape":"DeleteSubnetGroupResponse"}, "errors":[ {"shape":"SubnetGroupInUseFault"}, - {"shape":"SubnetGroupNotFoundFault"} + {"shape":"SubnetGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"} ], "documentation":"

Deletes a subnet group.

You cannot delete a subnet group if it is associated with any DAX clusters.

" }, @@ -144,6 +151,7 @@ "output":{"shape":"DescribeClustersResponse"}, "errors":[ {"shape":"ClusterNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -158,6 +166,7 @@ "input":{"shape":"DescribeDefaultParametersRequest"}, "output":{"shape":"DescribeDefaultParametersResponse"}, "errors":[ + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -172,6 +181,7 @@ "input":{"shape":"DescribeEventsRequest"}, "output":{"shape":"DescribeEventsResponse"}, "errors":[ + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -187,6 +197,7 @@ "output":{"shape":"DescribeParameterGroupsResponse"}, "errors":[ {"shape":"ParameterGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -202,6 +213,7 @@ "output":{"shape":"DescribeParametersResponse"}, "errors":[ {"shape":"ParameterGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -216,7 +228,8 @@ "input":{"shape":"DescribeSubnetGroupsRequest"}, "output":{"shape":"DescribeSubnetGroupsResponse"}, "errors":[ - {"shape":"SubnetGroupNotFoundFault"} + {"shape":"SubnetGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"} ], "documentation":"

Returns a list of subnet group descriptions. If a subnet group name is specified, the list will contain only the description of that group.

" }, @@ -235,6 +248,7 @@ {"shape":"InvalidVPCNetworkStateFault"}, {"shape":"NodeQuotaForClusterExceededFault"}, {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -252,6 +266,7 @@ {"shape":"ClusterNotFoundFault"}, {"shape":"InvalidARNFault"}, {"shape":"InvalidClusterStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -269,6 +284,7 @@ {"shape":"ClusterNotFoundFault"}, {"shape":"NodeNotFoundFault"}, {"shape":"InvalidClusterStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -287,6 +303,7 @@ {"shape":"TagQuotaPerResourceExceeded"}, {"shape":"InvalidARNFault"}, {"shape":"InvalidClusterStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -305,6 +322,7 @@ {"shape":"InvalidARNFault"}, {"shape":"TagNotFoundFault"}, {"shape":"InvalidClusterStateFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -323,6 +341,7 @@ {"shape":"ClusterNotFoundFault"}, {"shape":"InvalidParameterGroupStateFault"}, {"shape":"ParameterGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -339,6 +358,7 @@ "errors":[ {"shape":"InvalidParameterGroupStateFault"}, {"shape":"ParameterGroupNotFoundFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -356,7 +376,8 @@ {"shape":"SubnetGroupNotFoundFault"}, {"shape":"SubnetQuotaExceededFault"}, {"shape":"SubnetInUse"}, - {"shape":"InvalidSubnet"} + {"shape":"InvalidSubnet"}, + {"shape":"ServiceLinkedRoleNotFoundFault"} ], "documentation":"

Modifies an existing subnet group.

" } @@ -440,6 +461,10 @@ "ParameterGroup":{ "shape":"ParameterGroupStatus", "documentation":"

The parameter group being used by nodes in the cluster.

" + }, + "SSEDescription":{ + "shape":"SSEDescription", + "documentation":"

The description of the server-side encryption status on the specified DAX cluster.

" } }, "documentation":"

Contains all of the attributes of a specific DAX cluster.

" @@ -478,8 +503,7 @@ "required":[ "ClusterName", "NodeType", - "ReplicationFactor", - "IamRoleArn" + "ReplicationFactor" ], "members":{ "ClusterName":{ @@ -529,6 +553,10 @@ "Tags":{ "shape":"TagList", "documentation":"

A set of tags to associate with the DAX cluster.

" + }, + "SSESpecification":{ + "shape":"SSESpecification", + "documentation":"

Represents the settings used to enable server-side encryption on the cluster.

" } } }, @@ -981,7 +1009,8 @@ "message":{"shape":"AwsQueryErrorMessage"} }, "documentation":"

Two or more incompatible parameters were specified.

", - "exception":true + "exception":true, + "synthetic":true }, "InvalidParameterGroupStateFault":{ "type":"structure", @@ -996,7 +1025,8 @@ "message":{"shape":"AwsQueryErrorMessage"} }, "documentation":"

The value for a parameter is invalid.

", - "exception":true + "exception":true, + "synthetic":true }, "InvalidSubnet":{ "type":"structure", @@ -1304,6 +1334,37 @@ } } }, + "SSEDescription":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SSEStatus", + "documentation":"

The current state of server-side encryption:

" + } + }, + "documentation":"

The description of the server-side encryption status on the specified DAX cluster.

" + }, + "SSEEnabled":{"type":"boolean"}, + "SSESpecification":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"SSEEnabled", + "documentation":"

Indicates whether server-side encryption is enabled (true) or disabled (false) on the cluster.

" + } + }, + "documentation":"

Represents the settings used to enable server-side encryption.

" + }, + "SSEStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED" + ] + }, "SecurityGroupIdentifierList":{ "type":"list", "member":{"shape":"String"} @@ -1326,6 +1387,12 @@ "type":"list", "member":{"shape":"SecurityGroupMembership"} }, + "ServiceLinkedRoleNotFoundFault":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "SourceType":{ "type":"string", "enum":[ diff --git a/botocore/data/devicefarm/2015-06-23/service-2.json b/botocore/data/devicefarm/2015-06-23/service-2.json index b81bc1dc..6eeb0028 100644 --- a/botocore/data/devicefarm/2015-06-23/service-2.json +++ b/botocore/data/devicefarm/2015-06-23/service-2.json @@ -879,6 +879,22 @@ ], "documentation":"

Schedules a run.

" }, + "StopJob":{ + "name":"StopJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopJobRequest"}, + "output":{"shape":"StopJobResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ], + "documentation":"

Initiates a stop request for the current job. AWS Device Farm will immediately stop the job on the device where tests have not started executing, and you will not be billed for this device. On the device where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on the device. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.

" + }, "StopRemoteAccessSession":{ "name":"StopRemoteAccessSession", "http":{ @@ -991,6 +1007,22 @@ ], "documentation":"

Modifies the specified project name, given the project ARN and a new name.

" }, + "UpdateUpload":{ + "name":"UpdateUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUploadRequest"}, + "output":{"shape":"UpdateUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ], + "documentation":"

Update an uploaded test specification (test spec).

" + }, "UpdateVPCEConfiguration":{ "name":"UpdateVPCEConfiguration", "http":{ @@ -1140,7 +1172,8 @@ "XCTEST_LOG", "VIDEO", "CUSTOMER_ARTIFACT", - "CUSTOMER_ARTIFACT_LOG" + "CUSTOMER_ARTIFACT_LOG", + "TESTSPEC_OUTPUT" ] }, "Artifacts":{ @@ -1480,7 +1513,7 @@ }, "type":{ "shape":"UploadType", - "documentation":"

The upload's upload type.

Must be one of the following values:

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

" + "documentation":"

The upload's upload type.

Must be one of the following values:

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

" }, "contentType":{ "shape":"ContentType", @@ -1937,6 +1970,10 @@ "shape":"AppPackagesCleanup", "documentation":"

True if app package cleanup is enabled at the beginning of the test; otherwise, false.

" }, + "videoCapture":{ + "shape":"VideoCapture", + "documentation":"

Set to true to enable video capture; otherwise, set to false. The default is true.

" + }, "skipAppResign":{ "shape":"SkipAppResign", "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" @@ -2517,6 +2554,14 @@ "deviceMinutes":{ "shape":"DeviceMinutes", "documentation":"

Represents the total (metered or unmetered) minutes used by the job.

" + }, + "videoEndpoint":{ + "shape":"String", + "documentation":"

The endpoint for streaming device video.

" + }, + "videoCapture":{ + "shape":"VideoCapture", + "documentation":"

This value is set to true if video capture is enabled; otherwise, it is set to false.

" } }, "documentation":"

Represents a device.

" @@ -3026,6 +3071,10 @@ "shape":"AmazonResourceName", "documentation":"

The Amazon Resource Name (ARN) of the project for which you want to list uploads.

" }, + "type":{ + "shape":"UploadType", + "documentation":"

The type of upload.

Must be one of the following values:

" + }, "nextToken":{ "shape":"PaginationToken", "documentation":"

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

" @@ -3790,6 +3839,10 @@ "skipAppResign":{ "shape":"SkipAppResign", "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" + }, + "testSpecArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the YAML-formatted test specification for the run.

" } }, "documentation":"

Represents a test run on a set of devices with a given app package, test parameters, etc.

" @@ -3945,6 +3998,10 @@ "shape":"AmazonResourceName", "documentation":"

The ARN of the uploaded test that will be run.

" }, + "testSpecArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the YAML-formatted test specification.

" + }, "filter":{ "shape":"Filter", "documentation":"

The test's filter.

" @@ -3978,6 +4035,25 @@ "max":8192, "min":0 }, + "StopJobRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"AmazonResourceName", + "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm job you wish to stop.

" + } + } + }, + "StopJobResult":{ + "type":"structure", + "members":{ + "job":{ + "shape":"Job", + "documentation":"

The job that was stopped.

" + } + } + }, "StopRemoteAccessSessionRequest":{ "type":"structure", "required":["arn"], @@ -4393,6 +4469,37 @@ }, "documentation":"

Represents the result of an update project request.

" }, + "UpdateUploadRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the uploaded test spec.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The upload's test spec file name. The name should not contain the '/' character. The test spec file name must end with the .yaml or .yml file extension.

" + }, + "contentType":{ + "shape":"ContentType", + "documentation":"

The upload's content type (for example, \"application/x-yaml\").

" + }, + "editContent":{ + "shape":"Boolean", + "documentation":"

Set to true if the YAML file has changed and needs to be updated; otherwise, set to false.

" + } + } + }, + "UpdateUploadResult":{ + "type":"structure", + "members":{ + "upload":{ + "shape":"Upload", + "documentation":"

A test spec uploaded to Device Farm.

" + } + } + }, "UpdateVPCEConfigurationRequest":{ "type":"structure", "required":["arn"], @@ -4466,10 +4573,21 @@ "message":{ "shape":"Message", "documentation":"

A message about the upload's result.

" + }, + "category":{ + "shape":"UploadCategory", + "documentation":"

The upload's category. Allowed values include:

" } }, "documentation":"

An app or a set of one or more tests to upload or that have been uploaded.

" }, + "UploadCategory":{ + "type":"string", + "enum":[ + "CURATED", + "PRIVATE" + ] + }, "UploadStatus":{ "type":"string", "enum":[ @@ -4497,7 +4615,15 @@ "UIAUTOMATION_TEST_PACKAGE", "UIAUTOMATOR_TEST_PACKAGE", "XCTEST_TEST_PACKAGE", - "XCTEST_UI_TEST_PACKAGE" + "XCTEST_UI_TEST_PACKAGE", + "APPIUM_JAVA_JUNIT_TEST_SPEC", + "APPIUM_JAVA_TESTNG_TEST_SPEC", + "APPIUM_PYTHON_TEST_SPEC", + "APPIUM_WEB_JAVA_JUNIT_TEST_SPEC", + "APPIUM_WEB_JAVA_TESTNG_TEST_SPEC", + "APPIUM_WEB_PYTHON_TEST_SPEC", + "INSTRUMENTATION_TEST_SPEC", + "XCTEST_UI_TEST_SPEC" ] }, "Uploads":{ @@ -4548,7 +4674,8 @@ "type":"string", "max":2048, "min":0 - } + }, + "VideoCapture":{"type":"boolean"} }, "documentation":"

AWS Device Farm is a service that enables mobile app developers to test Android, iOS, and Fire OS apps on physical phones, tablets, and other devices in the cloud.

" } diff --git a/botocore/data/directconnect/2012-10-25/service-2.json b/botocore/data/directconnect/2012-10-25/service-2.json index 15f3a185..39979feb 100644 --- a/botocore/data/directconnect/2012-10-25/service-2.json +++ b/botocore/data/directconnect/2012-10-25/service-2.json @@ -818,8 +818,10 @@ }, "AwsDevice":{ "type":"string", - "documentation":"

An abstract ID for the physical Direct Connect endpoint.

Example: EQC50-abcdef123456

" + "documentation":"

An abstract ID for the physical Direct Connect endpoint.

Example: EQC50-abcdef123456

", + "deprecated":true }, + "AwsDeviceV2":{"type":"string"}, "BGPAuthKey":{ "type":"string", "documentation":"

The authentication key for BGP configuration.

Example: asdf34example

" @@ -833,7 +835,11 @@ "amazonAddress":{"shape":"AmazonAddress"}, "customerAddress":{"shape":"CustomerAddress"}, "bgpPeerState":{"shape":"BGPPeerState"}, - "bgpStatus":{"shape":"BGPStatus"} + "bgpStatus":{"shape":"BGPStatus"}, + "awsDeviceV2":{ + "shape":"AwsDeviceV2", + "documentation":"

The Direct Connection endpoint which the BGP peer terminates on.

" + } }, "documentation":"

A structure containing information about a BGP peer.

" }, @@ -855,7 +861,7 @@ }, "BGPStatus":{ "type":"string", - "documentation":"

The Up/Down state of the BGP peer.

", + "documentation":"

The Up/Down state of the BGP peer.

", "enum":[ "up", "down" @@ -948,6 +954,10 @@ "lagId":{"shape":"LagId"}, "awsDevice":{ "shape":"AwsDevice", + "documentation":"

Deprecated in favor of awsDeviceV2.

The Direct Connection endpoint which the physical connection terminates on.

" + }, + "awsDeviceV2":{ + "shape":"AwsDeviceV2", "documentation":"

The Direct Connection endpoint which the physical connection terminates on.

" } }, @@ -1681,6 +1691,10 @@ "lagId":{"shape":"LagId"}, "awsDevice":{ "shape":"AwsDevice", + "documentation":"

Deprecated in favor of awsDeviceV2.

The Direct Connection endpoint which the physical connection terminates on.

" + }, + "awsDeviceV2":{ + "shape":"AwsDeviceV2", "documentation":"

The Direct Connection endpoint which the physical connection terminates on.

" } }, @@ -1750,6 +1764,10 @@ }, "awsDevice":{ "shape":"AwsDevice", + "documentation":"

Deprecated in favor of awsDeviceV2.

The AWS Direct Connection endpoint that hosts the LAG.

" + }, + "awsDeviceV2":{ + "shape":"AwsDeviceV2", "documentation":"

The AWS Direct Connection endpoint that hosts the LAG.

" }, "connections":{ @@ -1823,6 +1841,10 @@ "locationName":{ "shape":"LocationName", "documentation":"

The name of the AWS Direct Connect location. The name includes the colocation partner name and the physical site of the lit building.

" + }, + "region":{ + "shape":"Region", + "documentation":"

The AWS region where the AWS Direct connect location is located.

Example: us-east-1

Default: None

" } }, "documentation":"

An AWS Direct Connect location where connections and interconnects can be requested.

" @@ -2172,7 +2194,15 @@ "virtualGatewayId":{"shape":"VirtualGatewayId"}, "directConnectGatewayId":{"shape":"DirectConnectGatewayId"}, "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"}, - "bgpPeers":{"shape":"BGPPeerList"} + "bgpPeers":{"shape":"BGPPeerList"}, + "region":{ + "shape":"Region", + "documentation":"

The AWS region where the virtual interface is located.

Example: us-east-1

Default: None

" + }, + "awsDeviceV2":{ + "shape":"AwsDeviceV2", + "documentation":"

The Direct Connection endpoint which the virtual interface terminates on.

" + } }, "documentation":"

A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect location and the customer.

" }, diff --git a/botocore/data/dlm/2018-01-12/paginators-1.json b/botocore/data/dlm/2018-01-12/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/dlm/2018-01-12/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json new file mode 100644 index 00000000..658e5448 --- /dev/null +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -0,0 +1,558 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-01-12", + "endpointPrefix":"dlm", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon DLM", + "serviceFullName":"Amazon Data Lifecycle Manager", + "serviceId":"DLM", + "signatureVersion":"v4", + "signingName":"dlm", + "uid":"dlm-2018-01-12" + }, + "operations":{ + "CreateLifecyclePolicy":{ + "name":"CreateLifecyclePolicy", + "http":{ + "method":"POST", + "requestUri":"/policies" + }, + "input":{"shape":"CreateLifecyclePolicyRequest"}, + "output":{"shape":"CreateLifecyclePolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a policy to manage the lifecycle of the specified AWS resources. You can create up to 100 lifecycle policies.

" + }, + "DeleteLifecyclePolicy":{ + "name":"DeleteLifecyclePolicy", + "http":{ + "method":"DELETE", + "requestUri":"/policies/{policyId}/" + }, + "input":{"shape":"DeleteLifecyclePolicyRequest"}, + "output":{"shape":"DeleteLifecyclePolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Deletes the specified lifecycle policy and halts the automated operations that the policy specified.

" + }, + "GetLifecyclePolicies":{ + "name":"GetLifecyclePolicies", + "http":{ + "method":"GET", + "requestUri":"/policies" + }, + "input":{"shape":"GetLifecyclePoliciesRequest"}, + "output":{"shape":"GetLifecyclePoliciesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Gets summary information about all or the specified data lifecycle policies.

To get complete information about a policy, use GetLifecyclePolicy.

" + }, + "GetLifecyclePolicy":{ + "name":"GetLifecyclePolicy", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyId}/" + }, + "input":{"shape":"GetLifecyclePolicyRequest"}, + "output":{"shape":"GetLifecyclePolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Gets detailed information about the specified lifecycle policy.

" + }, + "UpdateLifecyclePolicy":{ + "name":"UpdateLifecyclePolicy", + "http":{ + "method":"PATCH", + "requestUri":"/policies/{policyId}" + }, + "input":{"shape":"UpdateLifecyclePolicyRequest"}, + "output":{"shape":"UpdateLifecyclePolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates the specified lifecycle policy.

" + } + }, + "shapes":{ + "Count":{ + "type":"integer", + "max":1000, + "min":1 + }, + "CreateLifecyclePolicyRequest":{ + "type":"structure", + "required":[ + "ExecutionRoleArn", + "Description", + "State", + "PolicyDetails" + ], + "members":{ + "ExecutionRoleArn":{ + "shape":"ExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.

" + }, + "Description":{ + "shape":"PolicyDescription", + "documentation":"

A description of the lifecycle policy. The characters ^[0-9A-Za-z _-]+$ are supported.

" + }, + "State":{ + "shape":"SettablePolicyStateValues", + "documentation":"

The desired activation state of the lifecycle policy after creation.

" + }, + "PolicyDetails":{ + "shape":"PolicyDetails", + "documentation":"

The configuration of the lifecycle policy.

Target tags cannot be re-used across lifecycle policies.

" + } + } + }, + "CreateLifecyclePolicyResponse":{ + "type":"structure", + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The identifier of the lifecycle policy.

" + } + } + }, + "CreateRule":{ + "type":"structure", + "required":[ + "Interval", + "IntervalUnit" + ], + "members":{ + "Interval":{ + "shape":"Interval", + "documentation":"

The interval. The supported values are 12 and 24.

" + }, + "IntervalUnit":{ + "shape":"IntervalUnitValues", + "documentation":"

The interval unit.

" + }, + "Times":{ + "shape":"TimesList", + "documentation":"

The time, in UTC, to start the operation.

The operation occurs within a one-hour window following the specified time.

" + } + }, + "documentation":"

Specifies when to create snapshots of EBS volumes.

" + }, + "DeleteLifecyclePolicyRequest":{ + "type":"structure", + "required":["PolicyId"], + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The identifier of the lifecycle policy.

", + "location":"uri", + "locationName":"policyId" + } + } + }, + "DeleteLifecyclePolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "ExecutionRoleArn":{"type":"string"}, + "GetLifecyclePoliciesRequest":{ + "type":"structure", + "members":{ + "PolicyIds":{ + "shape":"PolicyIdList", + "documentation":"

The identifiers of the data lifecycle policies.

", + "location":"querystring", + "locationName":"policyIds" + }, + "State":{ + "shape":"GettablePolicyStateValues", + "documentation":"

The activation state.

", + "location":"querystring", + "locationName":"state" + }, + "ResourceTypes":{ + "shape":"ResourceTypeValuesList", + "documentation":"

The resource type.

", + "location":"querystring", + "locationName":"resourceTypes" + }, + "TargetTags":{ + "shape":"TargetTagsFilterList", + "documentation":"

The target tags.

Tags are strings in the format key:value.

", + "location":"querystring", + "locationName":"targetTags" + }, + "TagsToAdd":{ + "shape":"TagsToAddFilterList", + "documentation":"

The tags to add to the resources.

Tags are strings in the format key:value.

These tags are added in addition to the AWS-added lifecycle tags.

", + "location":"querystring", + "locationName":"tagsToAdd" + } + } + }, + "GetLifecyclePoliciesResponse":{ + "type":"structure", + "members":{ + "Policies":{ + "shape":"LifecyclePolicySummaryList", + "documentation":"

Summary information about the lifecycle policies.

" + } + } + }, + "GetLifecyclePolicyRequest":{ + "type":"structure", + "required":["PolicyId"], + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The identifier of the lifecycle policy.

", + "location":"uri", + "locationName":"policyId" + } + } + }, + "GetLifecyclePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"LifecyclePolicy", + "documentation":"

Detailed information about the lifecycle policy.

" + } + } + }, + "GettablePolicyStateValues":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "ERROR" + ] + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"} + }, + "documentation":"

The service failed in an unexpected way.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "Interval":{ + "type":"integer", + "min":1 + }, + "IntervalUnitValues":{ + "type":"string", + "enum":["HOURS"] + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"}, + "RequiredParameters":{ + "shape":"ParameterList", + "documentation":"

The request omitted one or more required parameters.

" + }, + "MutuallyExclusiveParameters":{ + "shape":"ParameterList", + "documentation":"

The request included parameters that cannot be provided together.

" + } + }, + "documentation":"

Bad request. The request is missing required parameters or has invalid parameters.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LifecyclePolicy":{ + "type":"structure", + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The identifier of the lifecycle policy.

" + }, + "Description":{ + "shape":"PolicyDescription", + "documentation":"

The description of the lifecycle policy.

" + }, + "State":{ + "shape":"GettablePolicyStateValues", + "documentation":"

The activation state of the lifecycle policy.

" + }, + "ExecutionRoleArn":{ + "shape":"ExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.

" + }, + "DateCreated":{ + "shape":"Timestamp", + "documentation":"

The local date and time when the lifecycle policy was created.

" + }, + "DateModified":{ + "shape":"Timestamp", + "documentation":"

The local date and time when the lifecycle policy was last modified.

" + }, + "PolicyDetails":{ + "shape":"PolicyDetails", + "documentation":"

The configuration of the lifecycle policy

" + } + }, + "documentation":"

Detailed information about a lifecycle policy.

" + }, + "LifecyclePolicySummary":{ + "type":"structure", + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The identifier of the lifecycle policy.

" + }, + "Description":{ + "shape":"PolicyDescription", + "documentation":"

The description of the lifecycle policy.

" + }, + "State":{ + "shape":"GettablePolicyStateValues", + "documentation":"

The activation state of the lifecycle policy.

" + } + }, + "documentation":"

Summary information about a lifecycle policy.

" + }, + "LifecyclePolicySummaryList":{ + "type":"list", + "member":{"shape":"LifecyclePolicySummary"} + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"}, + "ResourceType":{ + "shape":"String", + "documentation":"

Value is the type of resource for which a limit was exceeded.

" + } + }, + "documentation":"

The request failed because a limit was exceeded.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Parameter":{"type":"string"}, + "ParameterList":{ + "type":"list", + "member":{"shape":"Parameter"} + }, + "PolicyDescription":{ + "type":"string", + "max":500, + "min":0 + }, + "PolicyDetails":{ + "type":"structure", + "members":{ + "ResourceTypes":{ + "shape":"ResourceTypeValuesList", + "documentation":"

The resource type.

" + }, + "TargetTags":{ + "shape":"TargetTagList", + "documentation":"

The target tags.

" + }, + "Schedules":{ + "shape":"ScheduleList", + "documentation":"

The schedule.

" + } + }, + "documentation":"

Specifies the configuration of a lifecycle policy.

" + }, + "PolicyId":{"type":"string"}, + "PolicyIdList":{ + "type":"list", + "member":{"shape":"PolicyId"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"}, + "ResourceType":{ + "shape":"String", + "documentation":"

Value is the type of resource that was not found.

" + }, + "ResourceIds":{ + "shape":"PolicyIdList", + "documentation":"

Value is a list of resource IDs that were not found.

" + } + }, + "documentation":"

A requested resource was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceTypeValues":{ + "type":"string", + "enum":["VOLUME"] + }, + "ResourceTypeValuesList":{ + "type":"list", + "member":{"shape":"ResourceTypeValues"}, + "max":1, + "min":1 + }, + "RetainRule":{ + "type":"structure", + "required":["Count"], + "members":{ + "Count":{ + "shape":"Count", + "documentation":"

The number of snapshots to keep for each volume, up to a maximum of 1000.

" + } + }, + "documentation":"

Specifies the number of snapshots to keep for each EBS volume.

" + }, + "Schedule":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule.

" + }, + "TagsToAdd":{ + "shape":"TagsToAddList", + "documentation":"

The tags to add to policy-created resources. These tags are added in addition to the default lifecycle tags.

" + }, + "CreateRule":{ + "shape":"CreateRule", + "documentation":"

The create rule.

" + }, + "RetainRule":{ + "shape":"RetainRule", + "documentation":"

The retain rule.

" + } + }, + "documentation":"

Specifies a schedule.

" + }, + "ScheduleList":{ + "type":"list", + "member":{"shape":"Schedule"}, + "max":1, + "min":1 + }, + "ScheduleName":{ + "type":"string", + "max":500, + "min":0 + }, + "SettablePolicyStateValues":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"String", + "documentation":"

The tag key.

" + }, + "Value":{ + "shape":"String", + "documentation":"

The tag value.

" + } + }, + "documentation":"

Specifies a tag for a resource.

" + }, + "TagFilter":{"type":"string"}, + "TagsToAddFilterList":{ + "type":"list", + "member":{"shape":"TagFilter"}, + "max":50, + "min":0 + }, + "TagsToAddList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":0 + }, + "TargetTagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 + }, + "TargetTagsFilterList":{ + "type":"list", + "member":{"shape":"TagFilter"}, + "max":50, + "min":1 + }, + "Time":{ + "type":"string", + "pattern":"^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" + }, + "TimesList":{ + "type":"list", + "member":{"shape":"Time"}, + "max":1 + }, + "Timestamp":{"type":"timestamp"}, + "UpdateLifecyclePolicyRequest":{ + "type":"structure", + "required":["PolicyId"], + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The identifier of the lifecycle policy.

", + "location":"uri", + "locationName":"policyId" + }, + "ExecutionRoleArn":{ + "shape":"ExecutionRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.

" + }, + "State":{ + "shape":"SettablePolicyStateValues", + "documentation":"

The desired activation state of the lifecycle policy after creation.

" + }, + "Description":{ + "shape":"PolicyDescription", + "documentation":"

A description of the lifecycle policy.

" + }, + "PolicyDetails":{ + "shape":"PolicyDetails", + "documentation":"

The configuration of the lifecycle policy.

Target tags cannot be re-used across policies.

" + } + } + }, + "UpdateLifecyclePolicyResponse":{ + "type":"structure", + "members":{ + } + } + }, + "documentation":"Amazon Data Lifecycle Manager

With Amazon Data Lifecycle Manager, you can manage the lifecycle of your AWS resources. You create lifecycle policies, which are used to automate operations on the specified resources.

Amazon DLM supports Amazon EBS volumes and snapshots. For information about using Amazon DLM with Amazon EBS, see Automating the Amazon EBS Snapshot Lifecycle in the Amazon EC2 User Guide.

" +} diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 182b0c6b..575000b1 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -635,6 +635,149 @@ }, "documentation":"

For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.

You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.

Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.

" }, + "AutoScalingPolicyDescription":{ + "type":"structure", + "members":{ + "PolicyName":{ + "shape":"AutoScalingPolicyName", + "documentation":"

The name of the scaling policy.

" + }, + "TargetTrackingScalingPolicyConfiguration":{ + "shape":"AutoScalingTargetTrackingScalingPolicyConfigurationDescription", + "documentation":"

Represents a target tracking scaling policy configuration.

" + } + }, + "documentation":"

Represents the properties of the scaling policy.

" + }, + "AutoScalingPolicyDescriptionList":{ + "type":"list", + "member":{"shape":"AutoScalingPolicyDescription"} + }, + "AutoScalingPolicyName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\p{Print}+" + }, + "AutoScalingPolicyUpdate":{ + "type":"structure", + "required":["TargetTrackingScalingPolicyConfiguration"], + "members":{ + "PolicyName":{ + "shape":"AutoScalingPolicyName", + "documentation":"

The name of the scaling policy.

" + }, + "TargetTrackingScalingPolicyConfiguration":{ + "shape":"AutoScalingTargetTrackingScalingPolicyConfigurationUpdate", + "documentation":"

Represents a target tracking scaling policy configuration.

" + } + }, + "documentation":"

Represents the autoscaling policy to be modified.

" + }, + "AutoScalingRoleArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "AutoScalingSettingsDescription":{ + "type":"structure", + "members":{ + "MinimumUnits":{ + "shape":"PositiveLongObject", + "documentation":"

The minimum capacity units that a global table or global secondary index should be scaled down to.

" + }, + "MaximumUnits":{ + "shape":"PositiveLongObject", + "documentation":"

The maximum capacity units that a global table or global secondary index should be scaled up to.

" + }, + "AutoScalingDisabled":{ + "shape":"BooleanObject", + "documentation":"

Disabled autoscaling for this global table or global secondary index.

" + }, + "AutoScalingRoleArn":{ + "shape":"String", + "documentation":"

Role ARN used for configuring autoScaling policy.

" + }, + "ScalingPolicies":{ + "shape":"AutoScalingPolicyDescriptionList", + "documentation":"

Information about the scaling policies.

" + } + }, + "documentation":"

Represents the autoscaling settings for a global table or global secondary index.

" + }, + "AutoScalingSettingsUpdate":{ + "type":"structure", + "members":{ + "MinimumUnits":{ + "shape":"PositiveLongObject", + "documentation":"

The minimum capacity units that a global table or global secondary index should be scaled down to.

" + }, + "MaximumUnits":{ + "shape":"PositiveLongObject", + "documentation":"

The maximum capacity units that a global table or global secondary index should be scaled up to.

" + }, + "AutoScalingDisabled":{ + "shape":"BooleanObject", + "documentation":"

Disabled autoscaling for this global table or global secondary index.

" + }, + "AutoScalingRoleArn":{ + "shape":"AutoScalingRoleArn", + "documentation":"

Role ARN used for configuring autoscaling policy.

" + }, + "ScalingPolicyUpdate":{ + "shape":"AutoScalingPolicyUpdate", + "documentation":"

The scaling policy to apply for scaling target global table or global secondary index capacity units.

" + } + }, + "documentation":"

Represents the autoscaling settings to be modified for a global table or global secondary index.

" + }, + "AutoScalingTargetTrackingScalingPolicyConfigurationDescription":{ + "type":"structure", + "required":["TargetValue"], + "members":{ + "DisableScaleIn":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.

" + }, + "ScaleInCooldown":{ + "shape":"IntegerObject", + "documentation":"

The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. The cooldown period is used to block subsequent scale in requests until it has expired. You should scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, application autoscaling scales out your scalable target immediately.

" + }, + "ScaleOutCooldown":{ + "shape":"IntegerObject", + "documentation":"

The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. You should continuously (but not excessively) scale out.

" + }, + "TargetValue":{ + "shape":"Double", + "documentation":"

The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).

" + } + }, + "documentation":"

Represents the properties of a target tracking scaling policy.

" + }, + "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate":{ + "type":"structure", + "required":["TargetValue"], + "members":{ + "DisableScaleIn":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.

" + }, + "ScaleInCooldown":{ + "shape":"IntegerObject", + "documentation":"

The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. The cooldown period is used to block subsequent scale in requests until it has expired. You should scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, application autoscaling scales out your scalable target immediately.

" + }, + "ScaleOutCooldown":{ + "shape":"IntegerObject", + "documentation":"

The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. You should continuously (but not excessively) scale out.

" + }, + "TargetValue":{ + "shape":"Double", + "documentation":"

The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).

" + } + }, + "documentation":"

Represents the settings of a target tracking scaling policy that will be modified.

" + }, "Backfilling":{"type":"boolean"}, "BackupArn":{ "type":"string", @@ -666,6 +809,7 @@ "BackupArn", "BackupName", "BackupStatus", + "BackupType", "BackupCreationDateTime" ], "members":{ @@ -685,9 +829,17 @@ "shape":"BackupStatus", "documentation":"

Backup can be in one of the following states: CREATING, ACTIVE, DELETED.

" }, + "BackupType":{ + "shape":"BackupType", + "documentation":"

BackupType:

" + }, "BackupCreationDateTime":{ "shape":"BackupCreationDateTime", "documentation":"

Time at which the backup was created. This is the request time of the backup.

" + }, + "BackupExpiryDateTime":{ + "shape":"Date", + "documentation":"

Time at which the automatic on demand backup created by DynamoDB will expire. This SYSTEM on demand backup expires automatically 35 days after its creation.

" } }, "documentation":"

Contains the details of the backup created for the table.

" @@ -757,10 +909,18 @@ "shape":"BackupCreationDateTime", "documentation":"

Time at which the backup was created.

" }, + "BackupExpiryDateTime":{ + "shape":"Date", + "documentation":"

Time at which the automatic on demand backup created by DynamoDB will expire. This SYSTEM on demand backup expires automatically 35 days after its creation.

" + }, "BackupStatus":{ "shape":"BackupStatus", "documentation":"

Backup can be in one of the following states: CREATING, ACTIVE, DELETED.

" }, + "BackupType":{ + "shape":"BackupType", + "documentation":"

BackupType:

" + }, "BackupSizeBytes":{ "shape":"BackupSizeBytes", "documentation":"

Size of the backup in bytes.

" @@ -768,6 +928,21 @@ }, "documentation":"

Contains details for the backup.

" }, + "BackupType":{ + "type":"string", + "enum":[ + "USER", + "SYSTEM" + ] + }, + "BackupTypeFilter":{ + "type":"string", + "enum":[ + "USER", + "SYSTEM", + "ALL" + ] + }, "BackupsInputLimit":{ "type":"integer", "max":100, @@ -1419,6 +1594,7 @@ } } }, + "Double":{"type":"double"}, "ErrorMessage":{"type":"string"}, "ExpectedAttributeMap":{ "type":"map", @@ -1699,6 +1875,10 @@ "ProvisionedWriteCapacityUnits":{ "shape":"PositiveLongObject", "documentation":"

The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.

" + }, + "ProvisionedWriteCapacityAutoScalingSettingsUpdate":{ + "shape":"AutoScalingSettingsUpdate", + "documentation":"

AutoScaling settings for managing a global secondary index's write capacity units.

" } }, "documentation":"

Represents the settings of a global secondary index for a global table that will be modified.

" @@ -1754,6 +1934,7 @@ ] }, "Integer":{"type":"integer"}, + "IntegerObject":{"type":"integer"}, "InternalServerError":{ "type":"structure", "members":{ @@ -1944,6 +2125,10 @@ "ExclusiveStartBackupArn":{ "shape":"BackupArn", "documentation":"

LastEvaluatedBackupArn is the ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

" + }, + "BackupType":{ + "shape":"BackupTypeFilter", + "documentation":"

The backups from the table specified by BackupType are listed.

Where BackupType can be:

" } } }, @@ -2515,9 +2700,17 @@ "shape":"PositiveLongObject", "documentation":"

The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException.

" }, + "ProvisionedReadCapacityAutoScalingSettings":{ + "shape":"AutoScalingSettingsDescription", + "documentation":"

Autoscaling settings for a global secondary index replica's read capacity units.

" + }, "ProvisionedWriteCapacityUnits":{ "shape":"PositiveLongObject", "documentation":"

The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.

" + }, + "ProvisionedWriteCapacityAutoScalingSettings":{ + "shape":"AutoScalingSettingsDescription", + "documentation":"

AutoScaling settings for a global secondary index replica's write capacity units.

" } }, "documentation":"

Represents the properties of a global secondary index.

" @@ -2537,6 +2730,10 @@ "ProvisionedReadCapacityUnits":{ "shape":"PositiveLongObject", "documentation":"

The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException.

" + }, + "ProvisionedReadCapacityAutoScalingSettingsUpdate":{ + "shape":"AutoScalingSettingsUpdate", + "documentation":"

Autoscaling settings for managing a global secondary index replica's read capacity units.

" } }, "documentation":"

Represents the settings of a global secondary index for a global table that will be modified.

" @@ -2575,10 +2772,18 @@ "shape":"PositiveLongObject", "documentation":"

The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

" }, + "ReplicaProvisionedReadCapacityAutoScalingSettings":{ + "shape":"AutoScalingSettingsDescription", + "documentation":"

Autoscaling settings for a global table replica's read capacity units.

" + }, "ReplicaProvisionedWriteCapacityUnits":{ "shape":"PositiveLongObject", "documentation":"

The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

" }, + "ReplicaProvisionedWriteCapacityAutoScalingSettings":{ + "shape":"AutoScalingSettingsDescription", + "documentation":"

AutoScaling settings for a global table replica's write capacity units.

" + }, "ReplicaGlobalSecondaryIndexSettings":{ "shape":"ReplicaGlobalSecondaryIndexSettingsDescriptionList", "documentation":"

Replica global secondary index settings for the global table.

" @@ -2602,6 +2807,10 @@ "shape":"PositiveLongObject", "documentation":"

The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

" }, + "ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate":{ + "shape":"AutoScalingSettingsUpdate", + "documentation":"

Autoscaling settings for managing a global table replica's read capacity units.

" + }, "ReplicaGlobalSecondaryIndexSettingsUpdate":{ "shape":"ReplicaGlobalSecondaryIndexSettingsUpdateList", "documentation":"

Represents the settings of a global secondary index for a global table that will be modified.

" @@ -3385,6 +3594,10 @@ "shape":"PositiveLongObject", "documentation":"

The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.

" }, + "GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate":{ + "shape":"AutoScalingSettingsUpdate", + "documentation":"

AutoScaling settings for managing provisioned write capacity for the global table.

" + }, "GlobalTableGlobalSecondaryIndexSettingsUpdate":{ "shape":"GlobalTableGlobalSecondaryIndexSettingsUpdateList", "documentation":"

Represents the settings of a global secondary index for a global table that will be modified.

" diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 9383bbc9..df945c9d 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -50,7 +50,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

Allocates an Elastic IP address.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per region and 5 Elastic IP addresses for EC2-VPC per region.

If you release an Elastic IP address for use in a VPC, you might be able to recover it. To recover an Elastic IP address that you released, specify it in the Address parameter. Note that you cannot recover an Elastic IP address that you released after it is allocated to another AWS account.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Allocates an Elastic IP address to your AWS account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different AWS account.

[EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another AWS account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per region and 5 Elastic IP addresses for EC2-VPC per region.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" }, "AllocateHosts":{ "name":"AllocateHosts", @@ -89,7 +89,7 @@ }, "input":{"shape":"AssociateAddressRequest"}, "output":{"shape":"AssociateAddressResult"}, - "documentation":"

Associates an Elastic IP address with an instance or a network interface.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

" + "documentation":"

Associates an Elastic IP address with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

" }, "AssociateDhcpOptions":{ "name":"AssociateDhcpOptions", @@ -118,7 +118,7 @@ }, "input":{"shape":"AssociateRouteTableRequest"}, "output":{"shape":"AssociateRouteTableResult"}, - "documentation":"

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "AssociateSubnetCidrBlock":{ "name":"AssociateSubnetCidrBlock", @@ -157,7 +157,7 @@ "requestUri":"/" }, "input":{"shape":"AttachInternetGatewayRequest"}, - "documentation":"

Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

" }, "AttachNetworkInterface":{ "name":"AttachNetworkInterface", @@ -215,7 +215,7 @@ }, "input":{"shape":"BundleInstanceRequest"}, "output":{"shape":"BundleInstanceResult"}, - "documentation":"

Bundles an Amazon instance store-backed Windows instance.

During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

For more information, see Creating an Instance Store-Backed Windows AMI.

" + "documentation":"

Bundles an Amazon instance store-backed Windows instance.

During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

" }, "CancelBundleTask":{ "name":"CancelBundleTask", @@ -353,7 +353,7 @@ }, "input":{"shape":"CreateDefaultVpcRequest"}, "output":{"shape":"CreateDefaultVpcResult"}, - "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

You can create a default VPC if you deleted your previous default VPC. You cannot have more than one default VPC per region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a region that supports EC2-Classic. If you want a default VPC in a region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" + "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

iIf you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" }, "CreateDhcpOptions":{ "name":"CreateDhcpOptions", @@ -363,7 +363,7 @@ }, "input":{"shape":"CreateDhcpOptionsRequest"}, "output":{"shape":"CreateDhcpOptionsResult"}, - "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateEgressOnlyInternetGateway":{ "name":"CreateEgressOnlyInternetGateway", @@ -373,7 +373,7 @@ }, "input":{"shape":"CreateEgressOnlyInternetGatewayRequest"}, "output":{"shape":"CreateEgressOnlyInternetGatewayResult"}, - "documentation":"

[IPv6 only] Creates an egress-only Internet gateway for your VPC. An egress-only Internet gateway is used to enable outbound communication over IPv6 from instances in your VPC to the Internet, and prevents hosts outside of your VPC from initiating an IPv6 connection with your instance.

" + "documentation":"

[IPv6 only] Creates an egress-only internet gateway for your VPC. An egress-only internet gateway is used to enable outbound communication over IPv6 from instances in your VPC to the internet, and prevents hosts outside of your VPC from initiating an IPv6 connection with your instance.

" }, "CreateFleet":{ "name":"CreateFleet", @@ -393,7 +393,7 @@ }, "input":{"shape":"CreateFlowLogsRequest"}, "output":{"shape":"CreateFlowLogsResult"}, - "documentation":"

Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs.

In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" }, "CreateFpgaImage":{ "name":"CreateFpgaImage", @@ -433,7 +433,7 @@ }, "input":{"shape":"CreateInternetGatewayRequest"}, "output":{"shape":"CreateInternetGatewayResult"}, - "documentation":"

Creates an Internet gateway for use with a VPC. After creating the Internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

" }, "CreateKeyPair":{ "name":"CreateKeyPair", @@ -483,7 +483,7 @@ }, "input":{"shape":"CreateNetworkAclRequest"}, "output":{"shape":"CreateNetworkAclResult"}, - "documentation":"

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkAclEntry":{ "name":"CreateNetworkAclEntry", @@ -541,7 +541,7 @@ }, "input":{"shape":"CreateRouteRequest"}, "output":{"shape":"CreateRouteResult"}, - "documentation":"

Creates a route in a route table within a VPC.

You must specify one of the following targets: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only Internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "CreateRouteTable":{ "name":"CreateRouteTable", @@ -551,7 +551,7 @@ }, "input":{"shape":"CreateRouteTableRequest"}, "output":{"shape":"CreateRouteTableResult"}, - "documentation":"

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "CreateSecurityGroup":{ "name":"CreateSecurityGroup", @@ -591,7 +591,7 @@ }, "input":{"shape":"CreateSubnetRequest"}, "output":{"shape":"CreateSubnetResult"}, - "documentation":"

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and the IPv4 CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateTags":{ "name":"CreateTags", @@ -620,7 +620,7 @@ }, "input":{"shape":"CreateVpcRequest"}, "output":{"shape":"CreateVpcResult"}, - "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpcEndpoint":{ "name":"CreateVpcEndpoint", @@ -660,7 +660,7 @@ }, "input":{"shape":"CreateVpcPeeringConnectionRequest"}, "output":{"shape":"CreateVpcPeeringConnectionResult"}, - "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" + "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" }, "CreateVpnConnection":{ "name":"CreateVpnConnection", @@ -717,7 +717,7 @@ }, "input":{"shape":"DeleteEgressOnlyInternetGatewayRequest"}, "output":{"shape":"DeleteEgressOnlyInternetGatewayResult"}, - "documentation":"

Deletes an egress-only Internet gateway.

" + "documentation":"

Deletes an egress-only internet gateway.

" }, "DeleteFleets":{ "name":"DeleteFleets", @@ -756,7 +756,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteInternetGatewayRequest"}, - "documentation":"

Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.

" + "documentation":"

Deletes the specified internet gateway. You must detach the internet gateway from the VPC before you can delete it.

" }, "DeleteKeyPair":{ "name":"DeleteKeyPair", @@ -1058,7 +1058,7 @@ }, "input":{"shape":"DescribeClassicLinkInstancesRequest"}, "output":{"shape":"DescribeClassicLinkInstancesResult"}, - "documentation":"

Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.

" + "documentation":"

Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot use this request to return information about other instances.

" }, "DescribeConversionTasks":{ "name":"DescribeConversionTasks", @@ -1088,7 +1088,7 @@ }, "input":{"shape":"DescribeDhcpOptionsRequest"}, "output":{"shape":"DescribeDhcpOptionsResult"}, - "documentation":"

Describes one or more of your DHCP options sets.

For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeEgressOnlyInternetGateways":{ "name":"DescribeEgressOnlyInternetGateways", @@ -1098,7 +1098,7 @@ }, "input":{"shape":"DescribeEgressOnlyInternetGatewaysRequest"}, "output":{"shape":"DescribeEgressOnlyInternetGatewaysResult"}, - "documentation":"

Describes one or more of your egress-only Internet gateways.

" + "documentation":"

Describes one or more of your egress-only internet gateways.

" }, "DescribeElasticGpus":{ "name":"DescribeElasticGpus", @@ -1188,7 +1188,7 @@ }, "input":{"shape":"DescribeHostReservationOfferingsRequest"}, "output":{"shape":"DescribeHostReservationOfferingsResult"}, - "documentation":"

Describes the Dedicated Host Reservations that are available to purchase.

The results describe all the Dedicated Host Reservation offerings, including offerings that may not match the instance family and region of your Dedicated Hosts. When purchasing an offering, ensure that the the instance family and region of the offering matches that of the Dedicated Host/s it will be associated with. For an overview of supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the Dedicated Host reservations that are available to purchase.

The results describe all the Dedicated Host reservation offerings, including offerings that may not match the instance family and region of your Dedicated Hosts. When purchasing an offering, ensure that the instance family and Region of the offering matches that of the Dedicated Hosts with which it is to be associated . For more information about supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeHostReservations":{ "name":"DescribeHostReservations", @@ -1198,7 +1198,7 @@ }, "input":{"shape":"DescribeHostReservationsRequest"}, "output":{"shape":"DescribeHostReservationsResult"}, - "documentation":"

Describes Dedicated Host Reservations which are associated with Dedicated Hosts in your account.

" + "documentation":"

Describes reservations that are associated with Dedicated Hosts in your account.

" }, "DescribeHosts":{ "name":"DescribeHosts", @@ -1208,7 +1208,7 @@ }, "input":{"shape":"DescribeHostsRequest"}, "output":{"shape":"DescribeHostsResult"}, - "documentation":"

Describes one or more of your Dedicated Hosts.

The results describe only the Dedicated Hosts in the region you're currently using. All listed instances consume capacity on your Dedicated Host. Dedicated Hosts that have recently been released will be listed with the state released.

" + "documentation":"

Describes one or more of your Dedicated Hosts.

The results describe only the Dedicated Hosts in the region you're currently using. All listed instances consume capacity on your Dedicated Host. Dedicated Hosts that have recently been released are listed with the state released.

" }, "DescribeIamInstanceProfileAssociations":{ "name":"DescribeIamInstanceProfileAssociations", @@ -1328,7 +1328,7 @@ }, "input":{"shape":"DescribeInternetGatewaysRequest"}, "output":{"shape":"DescribeInternetGatewaysResult"}, - "documentation":"

Describes one or more of your Internet gateways.

" + "documentation":"

Describes one or more of your internet gateways.

" }, "DescribeKeyPairs":{ "name":"DescribeKeyPairs", @@ -1378,7 +1378,7 @@ }, "input":{"shape":"DescribeNatGatewaysRequest"}, "output":{"shape":"DescribeNatGatewaysResult"}, - "documentation":"

Describes one or more of the your NAT gateways.

" + "documentation":"

Describes one or more of your NAT gateways.

" }, "DescribeNetworkAcls":{ "name":"DescribeNetworkAcls", @@ -1388,7 +1388,7 @@ }, "input":{"shape":"DescribeNetworkAclsRequest"}, "output":{"shape":"DescribeNetworkAclsResult"}, - "documentation":"

Describes one or more of your network ACLs.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeNetworkInterfaceAttribute":{ "name":"DescribeNetworkInterfaceAttribute", @@ -1508,7 +1508,7 @@ }, "input":{"shape":"DescribeRouteTablesRequest"}, "output":{"shape":"DescribeRouteTablesResult"}, - "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeScheduledInstanceAvailability":{ "name":"DescribeScheduledInstanceAvailability", @@ -1648,7 +1648,7 @@ }, "input":{"shape":"DescribeSubnetsRequest"}, "output":{"shape":"DescribeSubnetsResult"}, - "documentation":"

Describes one or more of your subnets.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -1847,7 +1847,7 @@ "requestUri":"/" }, "input":{"shape":"DetachInternetGatewayRequest"}, - "documentation":"

Detaches an Internet gateway from a VPC, disabling connectivity between the Internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses or public IPv4 addresses.

" + "documentation":"

Detaches an internet gateway from a VPC, disabling connectivity between the internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses or public IPv4 addresses.

" }, "DetachNetworkInterface":{ "name":"DetachNetworkInterface", @@ -1904,7 +1904,7 @@ }, "input":{"shape":"DisableVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"DisableVpcClassicLinkDnsSupportResult"}, - "documentation":"

Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "DisassociateAddress":{ "name":"DisassociateAddress", @@ -1980,7 +1980,7 @@ }, "input":{"shape":"EnableVpcClassicLinkRequest"}, "output":{"shape":"EnableVpcClassicLinkResult"}, - "documentation":"

Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC's route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "EnableVpcClassicLinkDnsSupport":{ "name":"EnableVpcClassicLinkDnsSupport", @@ -1990,7 +1990,7 @@ }, "input":{"shape":"EnableVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"EnableVpcClassicLinkDnsSupportResult"}, - "documentation":"

Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "GetConsoleOutput":{ "name":"GetConsoleOutput", @@ -2000,7 +2000,7 @@ }, "input":{"shape":"GetConsoleOutputRequest"}, "output":{"shape":"GetConsoleOutputResult"}, - "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes output from the EC2Config service.

GetConsoleOutput returns up to 64 KB of console output shortly after it's generated by the instance.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is only supported on C5, M5, and i3.metal instances.

" + "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance Console Output in the Amazon Elastic Compute Cloud User Guide.

" }, "GetConsoleScreenshot":{ "name":"GetConsoleScreenshot", @@ -2130,7 +2130,7 @@ }, "input":{"shape":"ModifyHostsRequest"}, "output":{"shape":"ModifyHostsResult"}, - "documentation":"

Modify the auto-placement setting of a Dedicated Host. When auto-placement is enabled, AWS will place instances that you launch with a tenancy of host, but without targeting a specific host ID, onto any available Dedicated Host in your account which has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID if you want the instance to launch onto a specific host. If no host ID is provided, the instance will be launched onto a suitable host which has auto-placement enabled.

" + "documentation":"

Modify the auto-placement setting of a Dedicated Host. When auto-placement is enabled, any instances that you launch with a tenancy of host but without a specific host ID are placed onto any available Dedicated Host in your account that has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID ito have the instance launch onto a specific host. If no host ID is provided, the instance is launched onto a suitable host with auto-placement enabled.

" }, "ModifyIdFormat":{ "name":"ModifyIdFormat", @@ -2311,7 +2311,7 @@ }, "input":{"shape":"ModifyVpcEndpointServicePermissionsRequest"}, "output":{"shape":"ModifyVpcEndpointServicePermissionsResult"}, - "documentation":"

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

" + "documentation":"

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

" }, "ModifyVpcPeeringConnectionOptions":{ "name":"ModifyVpcPeeringConnectionOptions", @@ -2331,7 +2331,7 @@ }, "input":{"shape":"ModifyVpcTenancyRequest"}, "output":{"shape":"ModifyVpcTenancyResult"}, - "documentation":"

Modifies the instance tenancy attribute of the specified VPC. You can change the instance tenancy attribute of a VPC to default only. You cannot change the instance tenancy attribute to dedicated.

After you modify the tenancy of the VPC, any new instances that you launch into the VPC have a tenancy of default, unless you specify otherwise during launch. The tenancy of any existing instances in the VPC is not affected.

For more information about Dedicated Instances, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the instance tenancy attribute of the specified VPC. You can change the instance tenancy attribute of a VPC to default only. You cannot change the instance tenancy attribute to dedicated.

After you modify the tenancy of the VPC, any new instances that you launch into the VPC have a tenancy of default, unless you specify otherwise during launch. The tenancy of any existing instances in the VPC is not affected.

For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "MonitorInstances":{ "name":"MonitorInstances", @@ -2400,7 +2400,7 @@ }, "input":{"shape":"RegisterImageRequest"}, "output":{"shape":"RegisterImageResult"}, - "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and subsequent instances launched from such an AMI will not be able to connect to package update infrastructure. To create an AMI that must retain billing codes, see CreateImage.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" + "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and instances launched from such an AMI are not able to connect to package update infrastructure. If you purchase a Reserved Instance offering for one of these Linux distributions and launch instances using an AMI that does not contain the required billing code, your Reserved Instance is not applied to these instances.

To create an AMI for operating systems that require a billing code, see CreateImage.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" }, "RejectVpcEndpointConnections":{ "name":"RejectVpcEndpointConnections", @@ -2439,7 +2439,7 @@ }, "input":{"shape":"ReleaseHostsRequest"}, "output":{"shape":"ReleaseHostsResult"}, - "documentation":"

When you no longer want to use an On-Demand Dedicated Host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated Hosts that have been released can no longer be specified in another request, e.g., ModifyHosts. You must stop or terminate all instances on a host before it can be released.

When Dedicated Hosts are released, it make take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated hosts. Try waiting a few minutes, and then try again.

Released hosts will still appear in a DescribeHosts response.

" + "documentation":"

When you no longer want to use an On-Demand Dedicated Host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated Hosts that have been released can no longer be specified in another request, for example, ModifyHosts. You must stop or terminate all instances on a host before it can be released.

When Dedicated Hosts are released, it may take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated Hosts. Wait a few minutes and then try again.

Released hosts still appear in a DescribeHosts response.

" }, "ReplaceIamInstanceProfileAssociation":{ "name":"ReplaceIamInstanceProfileAssociation", @@ -2459,7 +2459,7 @@ }, "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, "output":{"shape":"ReplaceNetworkAclAssociationResult"}, - "documentation":"

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

" + "documentation":"

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

" }, "ReplaceNetworkAclEntry":{ "name":"ReplaceNetworkAclEntry", @@ -2468,7 +2468,7 @@ "requestUri":"/" }, "input":{"shape":"ReplaceNetworkAclEntryRequest"}, - "documentation":"

Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "ReplaceRoute":{ "name":"ReplaceRoute", @@ -2477,7 +2477,7 @@ "requestUri":"/" }, "input":{"shape":"ReplaceRouteRequest"}, - "documentation":"

Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only Internet gateway.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "ReplaceRouteTableAssociation":{ "name":"ReplaceRouteTableAssociation", @@ -3279,7 +3279,7 @@ "members":{ "AssociationId":{ "shape":"String", - "documentation":"

The route table association ID (needed to disassociate the route table).

", + "documentation":"

The route table association ID. This ID is required for disassociating the route table.

", "locationName":"associationId" } }, @@ -3422,7 +3422,7 @@ }, "InternetGatewayId":{ "shape":"String", - "documentation":"

The ID of the Internet gateway.

", + "documentation":"

The ID of the internet gateway.

", "locationName":"internetGatewayId" }, "VpcId":{ @@ -3739,7 +3739,7 @@ "members":{ "AvailableInstanceCapacity":{ "shape":"AvailableInstanceCapacityList", - "documentation":"

The total number of instances that the Dedicated Host supports.

", + "documentation":"

The total number of instances supported by the Dedicated Host.

", "locationName":"availableInstanceCapacity" }, "AvailableVCpus":{ @@ -3796,7 +3796,7 @@ }, "VirtualName":{ "shape":"String", - "documentation":"

The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

", + "documentation":"

The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1. The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

NVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.

Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

", "locationName":"virtualName" }, "Ebs":{ @@ -4858,7 +4858,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" }, "DryRun":{ "shape":"Boolean", @@ -4866,7 +4866,7 @@ }, "VpcId":{ "shape":"String", - "documentation":"

The ID of the VPC for which to create the egress-only Internet gateway.

" + "documentation":"

The ID of the VPC for which to create the egress-only internet gateway.

" } } }, @@ -4875,12 +4875,12 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "locationName":"clientToken" }, "EgressOnlyInternetGateway":{ "shape":"EgressOnlyInternetGateway", - "documentation":"

Information about the egress-only Internet gateway.

", + "documentation":"

Information about the egress-only internet gateway.

", "locationName":"egressOnlyInternetGateway" } } @@ -4902,7 +4902,11 @@ }, "SpotOptions":{ "shape":"SpotOptionsRequest", - "documentation":"

Includes SpotAllocationStrategy and SpotInstanceInterruptionBehavior inside this structure.

" + "documentation":"

Describes the configuration of Spot Instances in an EC2 Fleet.

" + }, + "OnDemandOptions":{ + "shape":"OnDemandOptionsRequest", + "documentation":"

The allocation strategy of On-Demand Instances in an EC2 Fleet.

" }, "ExcessCapacityTerminationPolicy":{ "shape":"FleetExcessCapacityTerminationPolicy", @@ -4956,24 +4960,26 @@ "CreateFlowLogsRequest":{ "type":"structure", "required":[ - "DeliverLogsPermissionArn", - "LogGroupName", "ResourceIds", "ResourceType", "TrafficType" ], "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" }, "DeliverLogsPermissionArn":{ "shape":"String", - "documentation":"

The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group.

" + "documentation":"

The ARN for the IAM role that's used to post flow logs to a log group.

" }, "LogGroupName":{ "shape":"String", - "documentation":"

The name of the CloudWatch log group.

" + "documentation":"

The name of the log group.

" }, "ResourceIds":{ "shape":"ValueStringList", @@ -4987,6 +4993,14 @@ "TrafficType":{ "shape":"TrafficType", "documentation":"

The type of traffic to log.

" + }, + "LogDestinationType":{ + "shape":"LogDestinationType", + "documentation":"

Specifies the type of destination to which the flow log data is to be published. Flow log data can be published to CloudWatch Logs or Amazon S3. To publish flow log data to CloudWatch Logs, specify cloud-watch-logs. To publish flow log data to Amazon S3, specify s3.

Default: cloud-watch-logs

" + }, + "LogDestination":{ + "shape":"String", + "documentation":"

Specifies the destination to which the flow log data is to be published. Flow log data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. The value specified for this parameter depends on the value specified for LogDestinationType.

If LogDestinationType is not specified or cloud-watch-logs, specify the Amazon Resource Name (ARN) of the CloudWatch Logs log group.

If LogDestinationType is s3, specify the ARN of the Amazon S3 bucket. You can also specify a subfolder in the bucket. To specify a subfolder in the bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, to specify a subfolder named my-logs in a bucket named my-bucket, use the following ARN: arn:aws:s3:::my-bucket/my-logs/.

" } }, "documentation":"

Contains the parameters for CreateFlowLogs.

" @@ -4996,7 +5010,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "locationName":"clientToken" }, "FlowLogIds":{ @@ -5162,7 +5176,7 @@ "members":{ "InternetGateway":{ "shape":"InternetGateway", - "documentation":"

Information about the Internet gateway.

", + "documentation":"

Information about the internet gateway.

", "locationName":"internetGateway" } }, @@ -5280,7 +5294,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

" }, "SubnetId":{ "shape":"String", @@ -5352,7 +5366,7 @@ }, "Protocol":{ "shape":"String", - "documentation":"

The protocol. A value of -1 or all means all protocols. If you specify all, -1, or a protocol number other than 6 (tcp), 17 (udp), or 1 (icmp), traffic on all ports is allowed, regardless of any ports or ICMP types or codes you specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless of any that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must specify an ICMP type and code.

", + "documentation":"

The protocol. A value of -1 or all means all protocols. If you specify all, -1, or a protocol number other than 6 (tcp), 17 (udp), or 1 (icmp), traffic on all ports is allowed, regardless of any ports or ICMP types or codes that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless of any that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must specify an ICMP type and code.

", "locationName":"protocol" }, "RuleAction":{ @@ -5589,12 +5603,12 @@ }, "EgressOnlyInternetGatewayId":{ "shape":"String", - "documentation":"

[IPv6 traffic only] The ID of an egress-only Internet gateway.

", + "documentation":"

[IPv6 traffic only] The ID of an egress-only internet gateway.

", "locationName":"egressOnlyInternetGatewayId" }, "GatewayId":{ "shape":"String", - "documentation":"

The ID of an Internet gateway or virtual private gateway attached to your VPC.

", + "documentation":"

The ID of an internet gateway or virtual private gateway attached to your VPC.

", "locationName":"gatewayId" }, "InstanceId":{ @@ -6246,7 +6260,7 @@ "members":{ "CpuCredits":{ "shape":"String", - "documentation":"

The credit option for CPU usage of a T2 instance.

", + "documentation":"

The credit option for CPU usage of a T2 instance. Valid values are standard and unlimited.

", "locationName":"cpuCredits" } }, @@ -6374,7 +6388,7 @@ }, "EgressOnlyInternetGatewayId":{ "shape":"EgressOnlyInternetGatewayId", - "documentation":"

The ID of the egress-only Internet gateway.

" + "documentation":"

The ID of the egress-only internet gateway.

" } } }, @@ -6505,6 +6519,10 @@ "type":"structure", "required":["FlowLogIds"], "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, "FlowLogIds":{ "shape":"ValueStringList", "documentation":"

One or more flow log IDs.

", @@ -6559,7 +6577,7 @@ }, "InternetGatewayId":{ "shape":"String", - "documentation":"

The ID of the Internet gateway.

", + "documentation":"

The ID of the internet gateway.

", "locationName":"internetGatewayId" } }, @@ -7204,7 +7222,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", "locationName":"Filter" }, "PublicIps":{ @@ -7329,7 +7347,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -7344,7 +7362,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

Constraint: If the value is greater than 1000, we return only 1000 items.

", + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000. If MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

Constraint: If the value is greater than 1000, we return only 1000 items.

", "locationName":"maxResults" }, "NextToken":{ @@ -7415,7 +7433,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -7447,7 +7465,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -7478,12 +7496,12 @@ }, "EgressOnlyInternetGatewayIds":{ "shape":"EgressOnlyInternetGatewayIdList", - "documentation":"

One or more egress-only Internet gateway IDs.

", + "documentation":"

One or more egress-only internet gateway IDs.

", "locationName":"EgressOnlyInternetGatewayId" }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000. If MaxResults is given a value larger than 1000, only 1000 results are returned.

" }, "NextToken":{ "shape":"String", @@ -7496,7 +7514,7 @@ "members":{ "EgressOnlyInternetGateways":{ "shape":"EgressOnlyInternetGatewayList", - "documentation":"

Information about the egress-only Internet gateways.

", + "documentation":"

Information about the egress-only internet gateways.

", "locationName":"egressOnlyInternetGatewaySet" }, "NextToken":{ @@ -7730,9 +7748,13 @@ "DescribeFlowLogsRequest":{ "type":"structure", "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

" + "documentation":"

One or more filters.

" }, "FlowLogIds":{ "shape":"ValueStringList", @@ -7741,7 +7763,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000. If MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

" }, "NextToken":{ "shape":"String", @@ -7816,7 +7838,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "NextToken":{ @@ -7849,15 +7871,15 @@ "members":{ "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

" + "documentation":"

One or more filters.

" }, "MaxDuration":{ "shape":"Integer", - "documentation":"

This is the maximum duration of the reservation you'd like to purchase, specified in seconds. Reservations are available in one-year and three-year terms. The number of seconds specified must be the number of seconds in a year (365x24x60x60) times one of the supported durations (1 or 3). For example, specify 94608000 for three years.

" + "documentation":"

This is the maximum duration of the reservation to purchase, specified in seconds. Reservations are available in one-year and three-year terms. The number of seconds specified must be the number of seconds in a year (365x24x60x60) times one of the supported durations (1 or 3). For example, specify 94608000 for three years.

" }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500; if maxResults is given a larger value than 500, you will receive an error.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" }, "MinDuration":{ "shape":"Integer", @@ -7893,7 +7915,7 @@ "members":{ "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

" + "documentation":"

One or more filters.

" }, "HostReservationIdSet":{ "shape":"HostReservationIdSet", @@ -7901,7 +7923,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500; if maxResults is given a larger value than 500, you will receive an error.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500.If maxResults is given a larger value than 500, you receive an error.

" }, "NextToken":{ "shape":"String", @@ -7929,7 +7951,7 @@ "members":{ "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"filter" }, "HostIds":{ @@ -7939,7 +7961,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500; if maxResults is given a larger value than 500, you will receive an error. You cannot specify this parameter and the host IDs parameter in the same request.

", + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error. You cannot specify this parameter and the host IDs parameter in the same request.

", "locationName":"maxResults" }, "NextToken":{ @@ -8086,7 +8108,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "ImageIds":{ @@ -8276,7 +8298,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "InstanceIds":{ @@ -8326,7 +8348,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "InstanceIds":{ @@ -8373,7 +8395,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -8383,7 +8405,7 @@ }, "InternetGatewayIds":{ "shape":"ValueStringList", - "documentation":"

One or more Internet gateway IDs.

Default: Describes all your Internet gateways.

", + "documentation":"

One or more internet gateway IDs.

Default: Describes all your internet gateways.

", "locationName":"internetGatewayId" } }, @@ -8394,7 +8416,7 @@ "members":{ "InternetGateways":{ "shape":"InternetGatewayList", - "documentation":"

Information about one or more Internet gateways.

", + "documentation":"

Information about one or more internet gateways.

", "locationName":"internetGatewaySet" } }, @@ -8509,7 +8531,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "NextToken":{ @@ -8589,7 +8611,7 @@ "members":{ "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

" + "documentation":"

One or more filters.

" }, "MaxResults":{ "shape":"Integer", @@ -8628,7 +8650,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -8753,7 +8775,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"filter" }, "DryRun":{ @@ -9088,7 +9110,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "OfferingClass":{ @@ -9129,7 +9151,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -9292,7 +9314,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

", + "documentation":"

One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

", "locationName":"Filter" }, "GroupIds":{ @@ -9386,7 +9408,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -9635,7 +9657,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -9771,7 +9793,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "SubnetIds":{ @@ -9829,7 +9851,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return..

", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "locationName":"nextToken" }, "Tags":{ @@ -9973,7 +9995,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VolumeIds":{ @@ -10100,7 +10122,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -10387,7 +10409,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -10419,7 +10441,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VpcIds":{ @@ -10451,7 +10473,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VpnConnectionIds":{ @@ -10483,7 +10505,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VpnGatewayIds":{ @@ -10560,7 +10582,7 @@ }, "InternetGatewayId":{ "shape":"String", - "documentation":"

The ID of the Internet gateway.

", + "documentation":"

The ID of the internet gateway.

", "locationName":"internetGatewayId" }, "VpcId":{ @@ -11112,16 +11134,16 @@ "members":{ "Attachments":{ "shape":"InternetGatewayAttachmentList", - "documentation":"

Information about the attachment of the egress-only Internet gateway.

", + "documentation":"

Information about the attachment of the egress-only internet gateway.

", "locationName":"attachmentSet" }, "EgressOnlyInternetGatewayId":{ "shape":"EgressOnlyInternetGatewayId", - "documentation":"

The ID of the egress-only Internet gateway.

", + "documentation":"

The ID of the egress-only internet gateway.

", "locationName":"egressOnlyInternetGatewayId" } }, - "documentation":"

Describes an egress-only Internet gateway.

" + "documentation":"

Describes an egress-only internet gateway.

" }, "EgressOnlyInternetGatewayId":{"type":"string"}, "EgressOnlyInternetGatewayIdList":{ @@ -11656,6 +11678,11 @@ "documentation":"

The configuration of Spot Instances in an EC2 Fleet.

", "locationName":"spotOptions" }, + "OnDemandOptions":{ + "shape":"OnDemandOptions", + "documentation":"

The allocation strategy of On-Demand Instances in an EC2 Fleet.

", + "locationName":"onDemandOptions" + }, "Tags":{ "shape":"TagList", "documentation":"

The tags for an EC2 Fleet resource.

", @@ -11756,6 +11783,11 @@ "shape":"Double", "documentation":"

The number of units provided by the specified instance type.

", "locationName":"weightedCapacity" + }, + "Priority":{ + "shape":"Double", + "documentation":"

The priority for the launch template override. If AllocationStrategy is set to prioritized, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the override has the lowest priority.

", + "locationName":"priority" } }, "documentation":"

Describes overrides for a launch template.

" @@ -11797,6 +11829,10 @@ "WeightedCapacity":{ "shape":"Double", "documentation":"

The number of units provided by the specified instance type.

" + }, + "Priority":{ + "shape":"Double", + "documentation":"

The priority for the launch template override. If AllocationStrategy is set to prioritized, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority.

" } }, "documentation":"

Describes overrides for a launch template.

" @@ -11840,6 +11876,13 @@ }, "documentation":"

The launch template to use. You must specify either the launch template ID or launch template name in the request.

" }, + "FleetOnDemandAllocationStrategy":{ + "type":"string", + "enum":[ + "lowest-price", + "prioritized" + ] + }, "FleetSet":{ "type":"list", "member":{ @@ -11877,7 +11920,7 @@ }, "DeliverLogsErrorMessage":{ "shape":"String", - "documentation":"

Information about the error that occurred. Rate limited indicates that CloudWatch logs throttling has been applied for one or more network interfaces, or that you've reached the limit on the number of CloudWatch Logs log groups that you can create. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

", + "documentation":"

Information about the error that occurred. Rate limited indicates that CloudWatch Logs throttling has been applied for one or more network interfaces, or that you've reached the limit on the number of log groups that you can create. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

", "locationName":"deliverLogsErrorMessage" }, "DeliverLogsPermissionArn":{ @@ -11914,6 +11957,16 @@ "shape":"TrafficType", "documentation":"

The type of traffic captured for the flow log.

", "locationName":"trafficType" + }, + "LogDestinationType":{ + "shape":"LogDestinationType", + "documentation":"

Specifies the type of destination to which the flow log data is published. Flow log data can be published to CloudWatch Logs or Amazon S3.

", + "locationName":"logDestinationType" + }, + "LogDestination":{ + "shape":"String", + "documentation":"

Specifies the destination to which the flow log data is published. Flow log data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. If the flow log publishes to CloudWatch Logs, this element indicates the Amazon Resource Name (ARN) of the CloudWatch Logs log group to which the data is published. If the flow log publishes to Amazon S3, this element indicates the ARN of the Amazon S3 bucket to which the data is published.

", + "locationName":"logDestination" } }, "documentation":"

Describes a flow log.

" @@ -12177,7 +12230,7 @@ "members":{ "HostIdSet":{ "shape":"RequestHostIdSet", - "documentation":"

The ID/s of the Dedicated Host/s that the reservation will be associated with.

" + "documentation":"

The IDs of the Dedicated Hosts with which the reservation is associated.

" }, "OfferingId":{ "shape":"String", @@ -12195,7 +12248,7 @@ }, "Purchase":{ "shape":"PurchaseSet", - "documentation":"

The purchase information of the Dedicated Host Reservation and the Dedicated Hosts associated with it.

", + "documentation":"

The purchase information of the Dedicated Host reservation and the Dedicated Hosts associated with it.

", "locationName":"purchase" }, "TotalHourlyPrice":{ @@ -12475,7 +12528,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"clientToken" }, "HostId":{ @@ -14502,7 +14555,7 @@ "members":{ "Code":{ "shape":"Integer", - "documentation":"

The low byte represents the state. The high byte is an opaque internal value and should be ignored.

", + "documentation":"

The low byte represents the state. The high byte is used for internal purposes and should be ignored.

", "locationName":"code" }, "Name":{ @@ -14712,6 +14765,24 @@ "r4.4xlarge", "r4.8xlarge", "r4.16xlarge", + "r5.large", + "r5.xlarge", + "r5.2xlarge", + "r5.4xlarge", + "r5.8xlarge", + "r5.12xlarge", + "r5.16xlarge", + "r5.24xlarge", + "r5.metal", + "r5d.large", + "r5d.xlarge", + "r5d.2xlarge", + "r5d.4xlarge", + "r5d.8xlarge", + "r5d.12xlarge", + "r5d.16xlarge", + "r5d.24xlarge", + "r5d.metal", "x1.16xlarge", "x1.32xlarge", "x1e.xlarge", @@ -14792,7 +14863,13 @@ "h1.2xlarge", "h1.4xlarge", "h1.8xlarge", - "h1.16xlarge" + "h1.16xlarge", + "z1d.large", + "z1d.xlarge", + "z1d.2xlarge", + "z1d.3xlarge", + "z1d.6xlarge", + "z1d.12xlarge" ] }, "InstanceTypeList":{ @@ -14812,28 +14889,28 @@ "members":{ "Attachments":{ "shape":"InternetGatewayAttachmentList", - "documentation":"

Any VPCs attached to the Internet gateway.

", + "documentation":"

Any VPCs attached to the internet gateway.

", "locationName":"attachmentSet" }, "InternetGatewayId":{ "shape":"String", - "documentation":"

The ID of the Internet gateway.

", + "documentation":"

The ID of the internet gateway.

", "locationName":"internetGatewayId" }, "Tags":{ "shape":"TagList", - "documentation":"

Any tags assigned to the Internet gateway.

", + "documentation":"

Any tags assigned to the internet gateway.

", "locationName":"tagSet" } }, - "documentation":"

Describes an Internet gateway.

" + "documentation":"

Describes an internet gateway.

" }, "InternetGatewayAttachment":{ "type":"structure", "members":{ "State":{ "shape":"AttachmentStatus", - "documentation":"

The current state of the attachment. For an Internet gateway, the state is available when attached to a VPC; otherwise, this value is not returned.

", + "documentation":"

The current state of the attachment. For an internet gateway, the state is available when attached to a VPC; otherwise, this value is not returned.

", "locationName":"state" }, "VpcId":{ @@ -14842,7 +14919,7 @@ "locationName":"vpcId" } }, - "documentation":"

Describes the attachment of a VPC to an Internet gateway or an egress-only Internet gateway.

" + "documentation":"

Describes the attachment of a VPC to an internet gateway or an egress-only internet gateway.

" }, "InternetGatewayAttachmentList":{ "type":"list", @@ -15637,6 +15714,11 @@ "shape":"Double", "documentation":"

The number of units provided by the specified instance type.

", "locationName":"weightedCapacity" + }, + "Priority":{ + "shape":"Double", + "documentation":"

The priority for the launch template override. If OnDemandAllocationStrategy is set to prioritized, Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority.

", + "locationName":"priority" } }, "documentation":"

Describes overrides for a launch template.

" @@ -16007,6 +16089,13 @@ }, "documentation":"

Describes a load permission.

" }, + "LogDestinationType":{ + "type":"string", + "enum":[ + "cloud-watch-logs", + "s3" + ] + }, "Long":{"type":"long"}, "MarketType":{ "type":"string", @@ -16047,7 +16136,7 @@ "members":{ "Return":{ "shape":"Boolean", - "documentation":"

Is true if the request succeeds, and an error otherwise.

", + "documentation":"

Is true if the request succeeds, and an error otherwise.

", "locationName":"return" } } @@ -16125,7 +16214,7 @@ }, "HostIds":{ "shape":"RequestHostIdList", - "documentation":"

The host IDs of the Dedicated Hosts you want to modify.

", + "documentation":"

The IDs of the Dedicated Hosts to modify.

", "locationName":"hostId" } }, @@ -16670,7 +16759,7 @@ }, "EnableDnsSupport":{ "shape":"AttributeBooleanValue", - "documentation":"

Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" will succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute.

" + "documentation":"

Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute.

" }, "VpcId":{ "shape":"String", @@ -16831,11 +16920,11 @@ }, "AddAllowedPrincipals":{ "shape":"ValueStringList", - "documentation":"

One or more Amazon Resource Names (ARNs) of principals for which to allow permission. Specify * to allow all principals.

" + "documentation":"

The Amazon Resource Names (ARN) of one or more principals. Permissions are granted to the principals in this list. To grant permissions to all principals, specify an asterisk (*).

" }, "RemoveAllowedPrincipals":{ "shape":"ValueStringList", - "documentation":"

One or more Amazon Resource Names (ARNs) of principals for which to remove permission.

" + "documentation":"

The Amazon Resource Names (ARN) of one or more principals. Permissions are revoked for principals in this list.

" } } }, @@ -17679,6 +17768,34 @@ "All Upfront" ] }, + "OnDemandAllocationStrategy":{ + "type":"string", + "enum":[ + "lowestPrice", + "prioritized" + ] + }, + "OnDemandOptions":{ + "type":"structure", + "members":{ + "AllocationStrategy":{ + "shape":"FleetOnDemandAllocationStrategy", + "documentation":"

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowest-price, EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to lowest-price.

", + "locationName":"allocationStrategy" + } + }, + "documentation":"

The allocation strategy of On-Demand Instances in an EC2 Fleet.

" + }, + "OnDemandOptionsRequest":{ + "type":"structure", + "members":{ + "AllocationStrategy":{ + "shape":"FleetOnDemandAllocationStrategy", + "documentation":"

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowest-price, EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to lowest-price.

" + } + }, + "documentation":"

The allocation strategy of On-Demand Instances in an EC2 Fleet.

" + }, "OperationType":{ "type":"string", "enum":[ @@ -17781,7 +17898,7 @@ }, "GroupName":{ "shape":"String", - "documentation":"

The name of the placement group the instance is in (for cluster compute instances).

", + "documentation":"

The name of the placement group the instance is in.

", "locationName":"groupName" }, "HostId":{ @@ -18129,7 +18246,7 @@ "members":{ "GatewayId":{ "shape":"String", - "documentation":"

The ID of the virtual private gateway (VGW).

", + "documentation":"

The ID of the virtual private gateway.

", "locationName":"gatewayId" } }, @@ -18243,11 +18360,11 @@ }, "HostIdSet":{ "shape":"RequestHostIdSet", - "documentation":"

The ID/s of the Dedicated Host/s that the reservation will be associated with.

" + "documentation":"

The IDs of the Dedicated Hosts with which the reservation will be associated.

" }, "LimitPrice":{ "shape":"String", - "documentation":"

The specified limit is checked against the total upfront cost of the reservation (calculated as the offering's upfront cost multiplied by the host count). If the total upfront cost is greater than the specified price limit, the request will fail. This is used to ensure that the purchase does not exceed the expected upfront cost of the purchase. At this time, the only supported currency is USD. For example, to indicate a limit price of USD 100, specify 100.00.

" + "documentation":"

The specified limit is checked against the total upfront cost of the reservation (calculated as the offering's upfront cost multiplied by the host count). If the total upfront cost is greater than the specified price limit, the request fails. This is used to ensure that the purchase does not exceed the expected upfront cost of the purchase. At this time, the only supported currency is USD. For example, to indicate a limit price of USD 100, specify 100.00.

" }, "OfferingId":{ "shape":"String", @@ -18260,7 +18377,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"clientToken" }, "CurrencyCode":{ @@ -18280,7 +18397,7 @@ }, "TotalUpfrontPrice":{ "shape":"String", - "documentation":"

The total amount that will be charged to your account when you purchase the reservation.

", + "documentation":"

The total amount charged to your account when you purchase the reservation.

", "locationName":"totalUpfrontPrice" } } @@ -18653,7 +18770,7 @@ "members":{ "HostIds":{ "shape":"RequestHostIdList", - "documentation":"

The IDs of the Dedicated Hosts you want to release.

", + "documentation":"

The IDs of the Dedicated Hosts to release.

", "locationName":"hostId" } }, @@ -18785,7 +18902,7 @@ }, "Protocol":{ "shape":"String", - "documentation":"

The IP protocol. You can specify all or -1 to mean all protocols. If you specify all, -1, or a protocol number other than tcp, udp, or icmp, traffic on all ports is allowed, regardless of any ports or ICMP types or codes you specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless of any that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must specify an ICMP type and code.

", + "documentation":"

The IP protocol. You can specify all or -1 to mean all protocols. If you specify all, -1, or a protocol number other than tcp, udp, or icmp, traffic on all ports is allowed, regardless of any ports or ICMP types or codes you that specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless of any that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must specify an ICMP type and code.

", "locationName":"protocol" }, "RuleAction":{ @@ -18807,12 +18924,12 @@ "members":{ "DestinationCidrBlock":{ "shape":"String", - "documentation":"

The IPv4 CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

", + "documentation":"

The IPv4 CIDR address block used for the destination match. The value that you provide must match the CIDR of an existing route in the table.

", "locationName":"destinationCidrBlock" }, "DestinationIpv6CidrBlock":{ "shape":"String", - "documentation":"

The IPv6 CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

", + "documentation":"

The IPv6 CIDR address block used for the destination match. The value that you provide must match the CIDR of an existing route in the table.

", "locationName":"destinationIpv6CidrBlock" }, "DryRun":{ @@ -18822,12 +18939,12 @@ }, "EgressOnlyInternetGatewayId":{ "shape":"String", - "documentation":"

[IPv6 traffic only] The ID of an egress-only Internet gateway.

", + "documentation":"

[IPv6 traffic only] The ID of an egress-only internet gateway.

", "locationName":"egressOnlyInternetGatewayId" }, "GatewayId":{ "shape":"String", - "documentation":"

The ID of an Internet gateway or virtual private gateway.

", + "documentation":"

The ID of an internet gateway or virtual private gateway.

", "locationName":"gatewayId" }, "InstanceId":{ @@ -20252,7 +20369,7 @@ }, "EgressOnlyInternetGatewayId":{ "shape":"String", - "documentation":"

The ID of the egress-only Internet gateway.

", + "documentation":"

The ID of the egress-only internet gateway.

", "locationName":"egressOnlyInternetGatewayId" }, "GatewayId":{ @@ -20550,7 +20667,7 @@ }, "InstanceMarketOptions":{ "shape":"InstanceMarketOptionsRequest", - "documentation":"

The market (purchasing) option for the instances.

" + "documentation":"

The market (purchasing) option for the instances.

For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop.

" }, "CreditSpecification":{ "shape":"CreditSpecificationRequest", @@ -21866,6 +21983,11 @@ "documentation":"

Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is lowestPrice.

", "locationName":"allocationStrategy" }, + "OnDemandAllocationStrategy":{ + "shape":"OnDemandAllocationStrategy", + "documentation":"

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowestPrice, Spot Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, Spot Fleet uses the priority that you assign to each Spot Fleet launch template override, launching the highest priority first. If you do not specify a value, Spot Fleet defaults to lowestPrice.

", + "locationName":"onDemandAllocationStrategy" + }, "ClientToken":{ "shape":"String", "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency.

", @@ -21950,6 +22072,11 @@ "shape":"LoadBalancersConfig", "documentation":"

One or more Classic Load Balancers and target groups to attach to the Spot Fleet request. Spot Fleet registers the running Spot Instances with the specified Classic Load Balancers and target groups.

With Network Load Balancers, Spot Fleet cannot register instances that have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1.

", "locationName":"loadBalancersConfig" + }, + "InstancePoolsToUseCount":{ + "shape":"Integer", + "documentation":"

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

", + "locationName":"instancePoolsToUseCount" } }, "documentation":"

Describes the configuration of a Spot Fleet request.

" @@ -22170,7 +22297,7 @@ }, "SpotInstanceType":{ "shape":"SpotInstanceType", - "documentation":"

The Spot Instance request type.

" + "documentation":"

The Spot Instance request type. For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop.

" }, "BlockDurationMinutes":{ "shape":"Integer", @@ -22192,13 +22319,18 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is lowestPrice.

", + "documentation":"

Indicates how to allocate the target capacity across the Spot pools specified by the Spot Fleet request. The default is lowest-price.

", "locationName":"allocationStrategy" }, "InstanceInterruptionBehavior":{ "shape":"SpotInstanceInterruptionBehavior", "documentation":"

The behavior when a Spot Instance is interrupted. The default is terminate.

", "locationName":"instanceInterruptionBehavior" + }, + "InstancePoolsToUseCount":{ + "shape":"Integer", + "documentation":"

The number of Spot pools across which to allocate your target Spot capacity. Valid only when AllocationStrategy is set to lowestPrice. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

", + "locationName":"instancePoolsToUseCount" } }, "documentation":"

Describes the configuration of Spot Instances in an EC2 Fleet.

" @@ -22213,6 +22345,10 @@ "InstanceInterruptionBehavior":{ "shape":"SpotInstanceInterruptionBehavior", "documentation":"

The behavior when a Spot Instance is interrupted. The default is terminate.

" + }, + "InstancePoolsToUseCount":{ + "shape":"Integer", + "documentation":"

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

" } }, "documentation":"

Describes the configuration of Spot Instances in an EC2 Fleet request.

" @@ -22514,7 +22650,7 @@ }, "AvailableIpAddressCount":{ "shape":"Integer", - "documentation":"

The number of unused private IPv4 addresses in the subnet. Note that the IPv4 addresses for any stopped instances are considered unavailable.

", + "documentation":"

The number of unused private IPv4 addresses in the subnet. The IPv4 addresses for any stopped instances are considered unavailable.

", "locationName":"availableIpAddressCount" }, "CidrBlock":{ @@ -24497,5 +24633,5 @@ ] } }, - "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster.

" + "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster.

To learn more about Amazon EC2, Amazon EBS, and Amazon VPC, see the following resources:

" } diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index b14c5c7e..40cbc137 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -97,7 +97,7 @@ {"shape":"ClusterNotFoundException"}, {"shape":"ServiceNotFoundException"} ], - "documentation":"

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in ListServices API operations. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices API operations. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices API operations on those services return a ServiceNotFoundException error.

" + "documentation":"

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in ListServices API operations. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices API operations. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices API operations on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you will receive an error.

" }, "DeregisterContainerInstance":{ "name":"DeregisterContainerInstance", @@ -374,7 +374,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an Elastic Network Interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" }, "RunTask":{ "name":"RunTask", @@ -395,7 +395,7 @@ {"shape":"AccessDeniedException"}, {"shape":"BlockedException"} ], - "documentation":"

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

The Amazon ECS API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. You should keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

" + "documentation":"

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

The Amazon ECS API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. You should keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

" }, "StartTask":{ "name":"StartTask", @@ -559,7 +559,7 @@ }, "details":{ "shape":"AttachmentDetails", - "documentation":"

Details of the attachment. For Elastic Network Interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.

" + "documentation":"

Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.

" } }, "documentation":"

An object representing a container instance or task attachment.

" @@ -634,15 +634,15 @@ "members":{ "subnets":{ "shape":"StringList", - "documentation":"

The subnets associated with the task or service. There is a limit of 10 subnets able to be specified per AwsVpcConfiguration.

" + "documentation":"

The subnets associated with the task or service. There is a limit of 10 subnets able to be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" }, "securityGroups":{ "shape":"StringList", - "documentation":"

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups able to be specified per AwsVpcConfiguration.

" + "documentation":"

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups able to be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

" }, "assignPublicIp":{ "shape":"AssignPublicIp", - "documentation":"

Whether the task's elastic network interface receives a public IP address.

" + "documentation":"

Whether the task's elastic network interface receives a public IP address. The default value is DISABLED.

" } }, "documentation":"

An object representing the networking details for a task or service.

" @@ -676,7 +676,7 @@ "members":{ "clusterArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the region of the cluster, the AWS account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test ..

" + "documentation":"

The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the Region of the cluster, the AWS account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test ..

" }, "clusterName":{ "shape":"String", @@ -688,7 +688,7 @@ }, "registeredContainerInstancesCount":{ "shape":"Integer", - "documentation":"

The number of container instances registered into the cluster.

" + "documentation":"

The number of container instances registered into the cluster. This includes container instances in both ACTIVE and DRAINING status.

" }, "runningTasksCount":{ "shape":"Integer", @@ -814,31 +814,35 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" + "documentation":"

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" }, "image":{ "shape":"String", - "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

" + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

" + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

The private repository authentication credentials to use.

" }, "cpu":{ "shape":"Integer", - "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" + "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" }, "memory":{ "shape":"BoxedInteger", - "documentation":"

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional and the only requirement is that the total amount of memory reserved for all containers within a task be lower than the task memory value.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional and the only requirement is that the total amount of memory reserved for all containers within a task be lower than the task memory value.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "links":{ "shape":"StringList", - "documentation":"

The link parameter allows containers to communicate with each other without the need for port mappings. Only supported if the network mode of a task definition is set to bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. For more information about linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run .

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" + "documentation":"

The link parameter allows containers to communicate with each other without the need for port mappings. Only supported if the network mode of a task definition is set to bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. For more information about linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run .

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" }, "portMappings":{ "shape":"PortMappingList", - "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console, or the networkBindings section DescribeTasks responses.

" + "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" }, "essential":{ "shape":"BoxedBoolean", @@ -846,23 +850,23 @@ }, "entryPoint":{ "shape":"StringList", - "documentation":"

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" + "documentation":"

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" }, "command":{ "shape":"StringList", - "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

" + "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

" }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" }, "mountPoints":{ "shape":"MountPointList", - "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" + "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" }, "volumesFrom":{ "shape":"VolumeFromList", - "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" + "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" }, "linuxParameters":{ "shape":"LinuxParameters", @@ -870,59 +874,59 @@ }, "hostname":{ "shape":"String", - "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if using the awsvpc networkMode.

" + "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if using the awsvpc networkMode.

" }, "user":{ "shape":"String", - "documentation":"

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

This parameter is not supported for Windows containers.

" }, "workingDirectory":{ "shape":"String", - "documentation":"

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" + "documentation":"

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" }, "disableNetworking":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

" }, "privileged":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" }, "readonlyRootFilesystem":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" }, "dnsServers":{ "shape":"StringList", - "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

" }, "dnsSearchDomains":{ "shape":"StringList", - "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" }, "extraHosts":{ "shape":"HostEntryList", - "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. If using the Fargate launch type, this may be used to list non-Fargate hosts you want the container to talk to. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. If using the Fargate launch type, this may be used to list non-Fargate hosts to which the container can talk. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers.

" }, "dockerSecurityOptions":{ "shape":"StringList", - "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" }, "dockerLabels":{ "shape":"DockerLabelsMap", - "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

This parameter is not supported for Windows containers.

" }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

If using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

If using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheck":{ "shape":"HealthCheck", - "documentation":"

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + "documentation":"

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" } }, "documentation":"

Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

" @@ -936,7 +940,7 @@ "members":{ "containerInstanceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

" + "documentation":"

The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

" }, "ec2InstanceId":{ "shape":"String", @@ -964,7 +968,7 @@ }, "agentConnected":{ "shape":"Boolean", - "documentation":"

This parameter returns true if the agent is connected to Amazon ECS. Registered instances with an agent that may be unhealthy or stopped return false. Instances without a connected agent can't accept placement requests.

" + "documentation":"

This parameter returns true if the agent is connected to Amazon ECS. Registered instances with an agent that may be unhealthy or stopped return false. Only instances connected to an agent can accept placement requests.

" }, "runningTasksCount":{ "shape":"Integer", @@ -988,7 +992,7 @@ }, "attachments":{ "shape":"Attachments", - "documentation":"

The Elastic Network Interfaces associated with the container instance.

" + "documentation":"

The elastic network interfaces associated with the container instance.

" } }, "documentation":"

An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

" @@ -1103,7 +1107,7 @@ }, "serviceName":{ "shape":"String", - "documentation":"

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.

" + "documentation":"

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.

" }, "taskDefinition":{ "shape":"String", @@ -1115,7 +1119,7 @@ }, "serviceRegistries":{ "shape":"ServiceRegistries", - "documentation":"

The details of the service discovery registries you want to assign to this service. For more information, see Service Discovery.

Service discovery is supported for Fargate tasks if using platform version v1.1.0 or later. For more information, see AWS Fargate Platform Versions.

" + "documentation":"

The details of the service discovery registries to assign to this service. For more information, see Service Discovery.

Service discovery is supported for Fargate tasks if using platform version v1.1.0 or later. For more information, see AWS Fargate Platform Versions.

" }, "desiredCount":{ "shape":"BoxedInteger", @@ -1155,7 +1159,7 @@ }, "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", - "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 1,800 seconds during which the ECS service scheduler ignores health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" + "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 7,200 seconds during which the ECS service scheduler ignores health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" }, "schedulingStrategy":{ "shape":"SchedulingStrategy", @@ -1286,7 +1290,7 @@ }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The VPC subnet and security group configuration for tasks that receive their own Elastic Network Interface by using the awsvpc networking mode.

" + "documentation":"

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

" } }, "documentation":"

The details of an Amazon ECS service deployment.

" @@ -1319,7 +1323,7 @@ }, "containerInstance":{ "shape":"String", - "documentation":"

The container instance ID or full ARN of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

" + "documentation":"

The container instance ID or full ARN of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

" }, "force":{ "shape":"BoxedBoolean", @@ -1529,7 +1533,7 @@ "members":{ "containerInstance":{ "shape":"String", - "documentation":"

The container instance ID or full ARN of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

" + "documentation":"

The container instance ID or full ARN of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

" }, "cluster":{ "shape":"String", @@ -1555,6 +1559,32 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "DockerVolumeConfiguration":{ + "type":"structure", + "members":{ + "scope":{ + "shape":"Scope", + "documentation":"

The scope for the Docker volume which determines it's lifecycle. Docker volumes that are scoped to a task are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as shared persist after the task stops.

" + }, + "autoprovision":{ + "shape":"BoxedBoolean", + "documentation":"

If this value is true, the Docker volume is created if it does not already exist.

This field is only used if the scope is shared.

" + }, + "driver":{ + "shape":"String", + "documentation":"

The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see Docker plugin discovery. This parameter maps to Driver in the Create a volume section of the Docker Remote API and the xxdriver option to docker volume create .

" + }, + "driverOpts":{ + "shape":"StringMap", + "documentation":"

A map of Docker driver specific options passed through. This parameter maps to DriverOpts in the Create a volume section of the Docker Remote API and the xxopt option to docker volume create .

" + }, + "labels":{ + "shape":"StringMap", + "documentation":"

Custom metadata to add to your Docker volume. This parameter maps to Labels in the Create a volume section of the Docker Remote API and the xxlabel option to docker volume create .

" + } + }, + "documentation":"

The configuration for the Docker volume. This parameter is specified when using Docker volumes.

" + }, "Double":{"type":"double"}, "EnvironmentVariables":{ "type":"list", @@ -1584,7 +1614,7 @@ "members":{ "command":{ "shape":"StringList", - "documentation":"

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. For example:

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

" + "documentation":"

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. For example:

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

" }, "interval":{ "shape":"BoxedInteger", @@ -1592,11 +1622,11 @@ }, "timeout":{ "shape":"BoxedInteger", - "documentation":"

The time period in seconds to wait for a health check to succeed before it is considered a failure. You may specify between 2 and 60 seconds. The default value is 5 seconds.

" + "documentation":"

The time period in seconds to wait for a health check to succeed before it is considered a failure. You may specify between 2 and 60 seconds. The default value is 5.

" }, "retries":{ "shape":"BoxedInteger", - "documentation":"

The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3 retries.

" + "documentation":"

The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3.

" }, "startPeriod":{ "shape":"BoxedInteger", @@ -1640,10 +1670,10 @@ "members":{ "sourcePath":{ "shape":"String", - "documentation":"

The path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If the host parameter contains a sourcePath file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

If you are using the Fargate launch type, the sourcePath parameter is not supported.

" + "documentation":"

When the host parameter is used, specify a sourcePath to declare the path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If the host parameter contains a sourcePath file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

If you are using the Fargate launch type, the sourcePath parameter is not supported.

" } }, - "documentation":"

Details on a container instance host volume.

" + "documentation":"

Details on a container instance bind mount host volume.

" }, "Integer":{"type":"integer"}, "InvalidParameterException":{ @@ -1658,11 +1688,11 @@ "members":{ "add":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

If you are using tasks that use the Fargate launch type, the add parameter is not supported.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

If you are using tasks that use the Fargate launch type, the add parameter is not supported.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" }, "drop":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" } }, "documentation":"

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information on the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information on these Linux capabilities, see the capabilities(7) Linux manual page.

" @@ -1697,7 +1727,7 @@ }, "devices":{ "shape":"DevicesList", - "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" + "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" }, "initProcessEnabled":{ "shape":"BoxedBoolean", @@ -1838,7 +1868,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type for services you want to list.

" + "documentation":"

The launch type for the services to list.

" }, "schedulingStrategy":{ "shape":"SchedulingStrategy", @@ -1968,7 +1998,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type for services you want to list.

" + "documentation":"

The launch type for services to list.

" } } }, @@ -2056,7 +2086,7 @@ "members":{ "sourceVolume":{ "shape":"String", - "documentation":"

The name of the volume to mount.

" + "documentation":"

The name of the volume to mount. Must be a volume name referenced in the name parameter of task definition volume.

" }, "containerPath":{ "shape":"String", @@ -2104,7 +2134,7 @@ "members":{ "awsvpcConfiguration":{ "shape":"AwsVpcConfiguration", - "documentation":"

The VPC subnets and security groups associated with a task.

" + "documentation":"

The VPC subnets and security groups associated with a task.

All specified subnets and security groups must be from the same VPC.

" } }, "documentation":"

An object representing the network configuration for a task or service.

" @@ -2125,7 +2155,7 @@ "documentation":"

The private IPv6 address for the network interface.

" } }, - "documentation":"

An object representing the Elastic Network Interface for tasks that use the awsvpc network mode.

" + "documentation":"

An object representing the elastic network interface for tasks that use the awsvpc network mode.

" }, "NetworkInterfaces":{ "type":"list", @@ -2156,7 +2186,7 @@ }, "expression":{ "shape":"String", - "documentation":"

A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, "documentation":"

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" @@ -2202,7 +2232,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified platform version does not satisfy the task definition’s required capabilities.

", + "documentation":"

The specified platform version does not satisfy the task definition's required capabilities.

", "exception":true }, "PlatformUnknownException":{ @@ -2340,11 +2370,11 @@ }, "cpu":{ "shape":"String", - "documentation":"

The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example 1 vCPU or 1 vcpu, in a task definition but will be converted to an integer indicating the CPU units when the task definition is registered.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

" + "documentation":"

The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example 1 vCPU or 1 vcpu, in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

" }, "memory":{ "shape":"String", - "documentation":"

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example 1GB or 1 GB, in a task definition but will be converted to an integer indicating the MiB when the task definition is registered.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If using the EC2 launch type, this field is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

" + "documentation":"

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example 1GB or 1 GB, in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If using the EC2 launch type, this field is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

" } } }, @@ -2357,6 +2387,17 @@ } } }, + "RepositoryCredentials":{ + "type":"structure", + "required":["credentialsParameter"], + "members":{ + "credentialsParameter":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) or name of the secret containing the private repository credentials.

" + } + }, + "documentation":"

The repository credentials for private registry authentication.

" + }, "RequiresAttributes":{ "type":"list", "member":{"shape":"Attribute"} @@ -2465,6 +2506,13 @@ "DAEMON" ] }, + "Scope":{ + "type":"string", + "enum":[ + "task", + "shared" + ] + }, "ServerException":{ "type":"structure", "members":{ @@ -2479,11 +2527,11 @@ "members":{ "serviceArn":{ "shape":"String", - "documentation":"

The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service .

" + "documentation":"

The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the Region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service .

" }, "serviceName":{ "shape":"String", - "documentation":"

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.

" + "documentation":"

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.

" }, "clusterArn":{ "shape":"String", @@ -2555,7 +2603,7 @@ }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The VPC subnet and security group configuration for tasks that receive their own Elastic Network Interface by using the awsvpc networking mode.

" + "documentation":"

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

" }, "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", @@ -2674,7 +2722,7 @@ }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The VPC subnet and security group configuration for tasks that receive their own Elastic Network Interface by using the awsvpc networking mode.

" + "documentation":"

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

" } } }, @@ -2727,6 +2775,11 @@ "type":"list", "member":{"shape":"String"} }, + "StringMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "SubmitContainerStateChangeRequest":{ "type":"structure", "members":{ @@ -2855,19 +2908,19 @@ }, "lastStatus":{ "shape":"String", - "documentation":"

The last known status of the task.

" + "documentation":"

The last known status of the task. For more information, see Task Lifecycle.

" }, "desiredStatus":{ "shape":"String", - "documentation":"

The desired status of the task.

" + "documentation":"

The desired status of the task. For more information, see Task Lifecycle.

" }, "cpu":{ "shape":"String", - "documentation":"

The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example 1 vCPU or 1 vcpu, in a task definition but is converted to an integer indicating the CPU units when the task definition is registered.

If using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

" + "documentation":"

The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example 1 vCPU or 1 vcpu, in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered.

If using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

" }, "memory":{ "shape":"String", - "documentation":"

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example 1GB or 1 GB, in a task definition but is converted to an integer indicating the MiB when the task definition is registered.

If using the EC2 launch type, this field is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

" + "documentation":"

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example 1GB or 1 GB, in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered.

If using the EC2 launch type, this field is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

" }, "containers":{ "shape":"Containers", @@ -2915,7 +2968,7 @@ }, "stoppingAt":{ "shape":"Timestamp", - "documentation":"

The Unix time stamp for when the task will stop (transitions from the RUNNING state to STOPPED).

" + "documentation":"

The Unix time stamp for when the task stops (transitions from the RUNNING state to STOPPED).

" }, "stoppedAt":{ "shape":"Timestamp", @@ -2935,7 +2988,7 @@ }, "attachments":{ "shape":"Attachments", - "documentation":"

The Elastic Network Adapter associated with the task if the task uses the awsvpc network mode.

" + "documentation":"

The elastic network adapter associated with the task if the task uses the awsvpc network mode.

" }, "healthStatus":{ "shape":"HealthStatus", @@ -3079,7 +3132,7 @@ "members":{ "containerPath":{ "shape":"String", - "documentation":"

The absolute file path where the tmpfs volume will be mounted.

" + "documentation":"

The absolute file path where the tmpfs volume is to be mounted.

" }, "size":{ "shape":"Integer", @@ -3087,7 +3140,7 @@ }, "mountOptions":{ "shape":"StringList", - "documentation":"

The list of tmpfs volume mount options.

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\"

" + "documentation":"

The list of tmpfs volume mount options.

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\" | \"nr_inodes\" | \"nr_blocks\" | \"mpol\"

" } }, "documentation":"

The container path, mount options, and size of the tmpfs mount.

" @@ -3251,7 +3304,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version you want to update your service to run.

" + "documentation":"

The platform version that your service should run.

" }, "forceNewDeployment":{ "shape":"Boolean", @@ -3299,10 +3352,14 @@ }, "host":{ "shape":"HostVolumeProperties", - "documentation":"

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" + "documentation":"

This parameter is specified when using bind mount host volumes. Bind mount host volumes are supported when using either the EC2 or Fargate launch types. The contents of the host parameter determine whether your bind mount host volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" + }, + "dockerVolumeConfiguration":{ + "shape":"DockerVolumeConfiguration", + "documentation":"

The configuration for the Docker volume. This parameter is specified when using Docker volumes.

" } }, - "documentation":"

A data volume used in a task definition.

" + "documentation":"

A data volume used in a task definition. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" }, "VolumeFrom":{ "type":"structure", diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index b20c7d0e..3166d333 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -24,7 +24,9 @@ {"shape":"BadRequest"}, {"shape":"InternalServerError"}, {"shape":"FileSystemAlreadyExists"}, - {"shape":"FileSystemLimitExceeded"} + {"shape":"FileSystemLimitExceeded"}, + {"shape":"InsufficientThroughputCapacity"}, + {"shape":"ThroughputLimitExceeded"} ], "documentation":"

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation also takes an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS: Performance Modes.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC via the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

" }, @@ -198,6 +200,26 @@ {"shape":"SecurityGroupNotFound"} ], "documentation":"

Modifies the set of security groups in effect for a mount target.

When you create a mount target, Amazon EFS also creates a new network interface. For more information, see CreateMountTarget. This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

The operation requires permissions for the following actions:

" + }, + "UpdateFileSystem":{ + "name":"UpdateFileSystem", + "http":{ + "method":"PUT", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}", + "responseCode":202 + }, + "input":{"shape":"UpdateFileSystemRequest"}, + "output":{"shape":"FileSystemDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"IncorrectFileSystemLifeCycleState"}, + {"shape":"InsufficientThroughputCapacity"}, + {"shape":"InternalServerError"}, + {"shape":"ThroughputLimitExceeded"}, + {"shape":"TooManyRequests"} + ], + "documentation":"

Updates the throughput mode or the amount of provisioned throughput of an existing file system.

" } }, "shapes":{ @@ -227,11 +249,19 @@ }, "Encrypted":{ "shape":"Encrypted", - "documentation":"

A boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying a CreateFileSystemRequest$KmsKeyId for an existing AWS Key Management Service (AWS KMS) customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system.

" + "documentation":"

A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying a CreateFileSystemRequest$KmsKeyId for an existing AWS Key Management Service (AWS KMS) customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This id can be in one of the following formats:

Note that if the KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

" + "documentation":"

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

" + }, + "ThroughputMode":{ + "shape":"ThroughputMode", + "documentation":"

The throughput mode for the file system to be created. There are two throughput modes to choose from for your file system: bursting and provisioned. You can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes as long as it’s been more than 24 hours since the last decrease or throughput mode change.

" + }, + "ProvisionedThroughputInMibps":{ + "shape":"ProvisionedThroughputInMibps", + "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" } } }, @@ -569,7 +599,7 @@ }, "SizeInBytes":{ "shape":"FileSystemSize", - "documentation":"

Latest known metered size (in bytes) of data stored in the file system, in bytes, in its Value field, and the time at which that size was determined in its Timestamp field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z. Note that the value does not represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value will represent actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not the exact size the file system was at any instant in time.

" + "documentation":"

Latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z. The SizeInBytes value doesn't represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, SizeInBytes represents actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not the exact size that the file system was at any point in time.

" }, "PerformanceMode":{ "shape":"PerformanceMode", @@ -577,11 +607,19 @@ }, "Encrypted":{ "shape":"Encrypted", - "documentation":"

A boolean value that, if true, indicates that the file system is encrypted.

" + "documentation":"

A Boolean value that, if true, indicates that the file system is encrypted.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The id of an AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the encrypted file system.

" + "documentation":"

The ID of an AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the encrypted file system.

" + }, + "ThroughputMode":{ + "shape":"ThroughputMode", + "documentation":"

The throughput mode for a file system. There are two throughput modes to choose from for your file system: bursting and provisioned. You can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes as long as it’s been more than 24 hours since the last decrease or throughput mode change.

" + }, + "ProvisionedThroughputInMibps":{ + "shape":"ProvisionedThroughputInMibps", + "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" } }, "documentation":"

Description of the file system.

" @@ -609,7 +647,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned if the AWS account has already created maximum number of file systems allowed per account.

", + "documentation":"

Returned if the AWS account has already created the maximum number of file systems allowed per account.

", "error":{"httpStatusCode":403}, "exception":true }, @@ -620,7 +658,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned if the specified FileSystemId does not exist in the requester's AWS account.

", + "documentation":"

Returned if the specified FileSystemId value doesn't exist in the requester's AWS account.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -650,7 +688,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned if the file system's life cycle state is not \"created\".

", + "documentation":"

Returned if the file system's lifecycle state is not \"available\".

", "error":{"httpStatusCode":409}, "exception":true }, @@ -665,6 +703,17 @@ "error":{"httpStatusCode":409}, "exception":true }, + "InsufficientThroughputCapacity":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if there's not enough capacity to provision additional throughput. This value might be returned when you try to create a file system in provisioned throughput mode, when you attempt to increase the provisioned throughput of an existing file system, or when you attempt to change an existing file system from bursting to provisioned throughput mode.

", + "error":{"httpStatusCode":503}, + "exception":true + }, "InternalServerError":{ "type":"structure", "required":["ErrorCode"], @@ -698,6 +747,7 @@ "enum":[ "creating", "available", + "updating", "deleting", "deleted" ] @@ -803,7 +853,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The calling account has reached the ENI limit for the specific AWS region. Client should try to delete some ENIs or get its account limit raised. For more information, see Amazon VPC Limits in the Amazon Virtual Private Cloud User Guide (see the Network interfaces per VPC entry in the table).

", + "documentation":"

The calling account has reached the limit for elastic network interfaces for the specific AWS Region. The client should try to delete some elastic network interfaces or get the account limit raised. For more information, see Amazon VPC Limits in the Amazon VPC User Guide (see the Network interfaces per VPC entry in the table).

", "error":{"httpStatusCode":409}, "exception":true }, @@ -825,6 +875,10 @@ "maxIO" ] }, + "ProvisionedThroughputInMibps":{ + "type":"double", + "min":0.0 + }, "SecurityGroup":{"type":"string"}, "SecurityGroupLimitExceeded":{ "type":"structure", @@ -844,7 +898,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned if one of the specified security groups does not exist in the subnet's VPC.

", + "documentation":"

Returned if one of the specified security groups doesn't exist in the subnet's VPC.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -900,7 +954,36 @@ "type":"list", "member":{"shape":"Tag"} }, + "ThroughputLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the throughput mode or amount of provisioned throughput can't be changed because the throughput limit of 1024 MiB/s has been reached.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ThroughputMode":{ + "type":"string", + "enum":[ + "bursting", + "provisioned" + ] + }, "Timestamp":{"type":"timestamp"}, + "TooManyRequests":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if you don’t wait at least 24 hours before changing the throughput mode, or decreasing the Provisioned Throughput value.

", + "error":{"httpStatusCode":429}, + "exception":true + }, "UnsupportedAvailabilityZone":{ "type":"structure", "required":["ErrorCode"], @@ -911,6 +994,26 @@ "documentation":"

", "error":{"httpStatusCode":400}, "exception":true + }, + "UpdateFileSystemRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the file system that you want to update.

", + "location":"uri", + "locationName":"FileSystemId" + }, + "ThroughputMode":{ + "shape":"ThroughputMode", + "documentation":"

(Optional) The throughput mode that you want your file system to use. If you're not updating your throughput mode, you don't need to provide this value in your request.

" + }, + "ProvisionedThroughputInMibps":{ + "shape":"ProvisionedThroughputInMibps", + "documentation":"

(Optional) The amount of throughput, in MiB/s, that you want to provision for your file system. If you're not updating the amount of provisioned throughput for your file system, you don't need to provide this value in your request.

" + } + } } }, "documentation":"Amazon Elastic File System

Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so your applications have the storage they need, when they need it. For more information, see the User Guide.

" diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index 0d5afbbe..18ff0a86 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -167,7 +167,7 @@ "errors":[ {"shape":"ListenerNotFoundException"} ], - "documentation":"

Deletes the specified listener.

Alternatively, your listener is deleted when you delete the load balancer it is attached to using DeleteLoadBalancer.

" + "documentation":"

Deletes the specified listener.

Alternatively, your listener is deleted when you delete the load balancer to which it is attached, using DeleteLoadBalancer.

" }, "DeleteLoadBalancer":{ "name":"DeleteLoadBalancer", @@ -598,7 +598,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"InvalidSubnetException"} ], - "documentation":"

Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.

Note that Network Load Balancers must use ipv4.

" + "documentation":"

Sets the type of IP addresses used by the subnets of the specified Application Load Balancer or Network Load Balancer.

Network Load Balancers must use ipv4.

" }, "SetRulePriorities":{ "name":"SetRulePriorities", @@ -634,7 +634,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"InvalidSecurityGroupException"} ], - "documentation":"

Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups.

Note that you can't specify a security group for a Network Load Balancer.

" + "documentation":"

Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups.

You can't specify a security group for a Network Load Balancer.

" }, "SetSubnets":{ "name":"SetSubnets", @@ -655,7 +655,7 @@ {"shape":"AllocationIdNotFoundException"}, {"shape":"AvailabilityZoneNotSupportedException"} ], - "documentation":"

Enables the Availability Zone for the specified public subnets for the specified Application Load Balancer. The specified subnets replace the previously enabled subnets.

Note that you can't change the subnets for a Network Load Balancer.

" + "documentation":"

Enables the Availability Zone for the specified public subnets for the specified Application Load Balancer. The specified subnets replace the previously enabled subnets.

You can't change the subnets for a Network Load Balancer.

" } }, "shapes":{ @@ -665,11 +665,11 @@ "members":{ "Type":{ "shape":"ActionTypeEnum", - "documentation":"

The type of action. Each rule must include one forward action.

" + "documentation":"

The type of action. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

" }, "TargetGroupArn":{ "shape":"TargetGroupArn", - "documentation":"

The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward.

For a default rule, the protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

" + "documentation":"

The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward.

" }, "AuthenticateOidcConfig":{ "shape":"AuthenticateOidcActionConfig", @@ -681,7 +681,15 @@ }, "Order":{ "shape":"ActionOrder", - "documentation":"

The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The forward action must be performed last.

" + "documentation":"

The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a forward or a fixed-response action.

" + }, + "RedirectConfig":{ + "shape":"RedirectActionConfig", + "documentation":"

[Application Load Balancer] Information for creating a redirect action. Specify only when Type is redirect.

" + }, + "FixedResponseConfig":{ + "shape":"FixedResponseActionConfig", + "documentation":"

[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when Type is fixed-response.

" } }, "documentation":"

Information about an action.

" @@ -696,7 +704,9 @@ "enum":[ "forward", "authenticate-oidc", - "authenticate-cognito" + "authenticate-cognito", + "redirect", + "fixed-response" ] }, "Actions":{ @@ -1027,11 +1037,11 @@ }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. To create a certificate list, use AddListenerCertificates.

" + "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one default certificate. To create a certificate list, use AddListenerCertificates.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The actions for the default rule. The rule must include one forward action.

If the action type is forward, you can specify a single target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" + "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you can specify a single target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

[HTTPS listener] If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

[HTTPS listener] If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

[Application Load Balancer] If the action type is redirect, you can redirect HTTP and HTTPS requests.

[Application Load Balancer] If the action type is fixed-response, you can return a custom HTTP response.

" } } }, @@ -1066,7 +1076,7 @@ }, "Scheme":{ "shape":"LoadBalancerSchemeEnum", - "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the Internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.

The default is an Internet-facing load balancer.

" + "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.

The default is an Internet-facing load balancer.

" }, "Tags":{ "shape":"TagList", @@ -1106,7 +1116,7 @@ }, "Conditions":{ "shape":"RuleConditionList", - "documentation":"

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

" + "documentation":"

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

" }, "Priority":{ "shape":"RulePriority", @@ -1114,7 +1124,7 @@ }, "Actions":{ "shape":"Actions", - "documentation":"

The actions. Each rule must include one forward action.

If the action type is forward, you can specify a single target group.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" + "documentation":"

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

If the action type is forward, you can specify a single target group.

[HTTPS listener] If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

[HTTPS listener] If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

[Application Load Balancer] If the action type is redirect, you can redirect HTTP and HTTPS requests.

[Application Load Balancer] If the action type is fixed-response, you can return a custom HTTP response.

" } } }, @@ -1166,11 +1176,11 @@ }, "HealthCheckIntervalSeconds":{ "shape":"HealthCheckIntervalSeconds", - "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5 to 300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds. The default is 30 seconds.

" + "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5–300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds. The default is 30 seconds.

" }, "HealthCheckTimeoutSeconds":{ "shape":"HealthCheckTimeoutSeconds", - "documentation":"

The amount of time, in seconds, during which no response from a target means a failed health check. For Application Load Balancers, the range is 2 to 60 seconds and the default is 5 seconds. For Network Load Balancers, this is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health checks.

" + "documentation":"

The amount of time, in seconds, during which no response from a target means a failed health check. For Application Load Balancers, the range is 2–60 seconds and the default is 5 seconds. For Network Load Balancers, this is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health checks.

" }, "HealthyThresholdCount":{ "shape":"HealthCheckThresholdCount", @@ -1186,7 +1196,7 @@ }, "TargetType":{ "shape":"TargetTypeEnum", - "documentation":"

The type of target that you must specify when registering targets with this target group. The possible values are instance (targets are specified by instance ID) or ip (targets are specified by IP address). The default is instance. Note that you can't specify targets for a target group using both instance IDs and IP addresses.

If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

" + "documentation":"

The type of target that you must specify when registering targets with this target group. The possible values are instance (targets are specified by instance ID) or ip (targets are specified by IP address). The default is instance. You can't specify targets for a target group using both instance IDs and IP addresses.

If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

" } } }, @@ -1640,6 +1650,39 @@ }, "exception":true }, + "FixedResponseActionConfig":{ + "type":"structure", + "required":["StatusCode"], + "members":{ + "MessageBody":{ + "shape":"FixedResponseActionMessage", + "documentation":"

The message.

" + }, + "StatusCode":{ + "shape":"FixedResponseActionStatusCode", + "documentation":"

The HTTP response code (2XX, 4XX, or 5XX).

" + }, + "ContentType":{ + "shape":"FixedResponseActionContentType", + "documentation":"

The content type.

Valid Values: text/plain | text/css | text/html | application/javascript | application/json

" + } + }, + "documentation":"

Information about an action that returns a custom HTTP response.

" + }, + "FixedResponseActionContentType":{ + "type":"string", + "max":32, + "min":0 + }, + "FixedResponseActionMessage":{ + "type":"string", + "max":1024, + "min":0 + }, + "FixedResponseActionStatusCode":{ + "type":"string", + "pattern":"^(2|4|5)\\d\\d$" + }, "HealthCheckIntervalSeconds":{ "type":"integer", "max":300, @@ -1863,7 +1906,7 @@ }, "Scheme":{ "shape":"LoadBalancerSchemeEnum", - "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the Internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.

" + "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.

" }, "VpcId":{ "shape":"VpcId", @@ -1920,7 +1963,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attributes are supported by only Network Load Balancers:

" + "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attributes are supported by only Network Load Balancers:

" }, "Value":{ "shape":"LoadBalancerAttributeValue", @@ -2008,7 +2051,7 @@ "members":{ "HttpCode":{ "shape":"HttpCode", - "documentation":"

The HTTP codes.

For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\").

For Network Load Balancers, this is 200 to 399.

" + "documentation":"

The HTTP codes.

For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\").

For Network Load Balancers, this is 200–399.

" } }, "documentation":"

Information to use when checking for a successful response from a target.

" @@ -2036,11 +2079,11 @@ }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. To create a certificate list, use AddListenerCertificates.

" + "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one default certificate. To create a certificate list, use AddListenerCertificates.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The actions for the default rule. The rule must include one forward action.

If the action type is forward, you can specify a single target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" + "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you can specify a single target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

[HTTPS listener] If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

[HTTPS listener] If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

[Application Load Balancer] If the action type is redirect, you can redirect HTTP and HTTPS requests.

[Application Load Balancer] If the action type is fixed-response, you can return a custom HTTP response.

" } } }, @@ -2089,7 +2132,7 @@ }, "Conditions":{ "shape":"RuleConditionList", - "documentation":"

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

" + "documentation":"

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

" }, "Actions":{ "shape":"Actions", @@ -2154,7 +2197,7 @@ }, "HealthCheckIntervalSeconds":{ "shape":"HealthCheckIntervalSeconds", - "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5 to 300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.

" + "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5–300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.

" }, "HealthCheckTimeoutSeconds":{ "shape":"HealthCheckTimeoutSeconds", @@ -2231,6 +2274,64 @@ "TCP" ] }, + "RedirectActionConfig":{ + "type":"structure", + "required":["StatusCode"], + "members":{ + "Protocol":{ + "shape":"RedirectActionProtocol", + "documentation":"

The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.

" + }, + "Port":{ + "shape":"RedirectActionPort", + "documentation":"

The port. You can specify a value from 1 to 65535 or #{port}.

" + }, + "Host":{ + "shape":"RedirectActionHost", + "documentation":"

The hostname. This component is not percent-encoded. The hostname can contain #{host}.

" + }, + "Path":{ + "shape":"RedirectActionPath", + "documentation":"

The absolute path, starting with the leading \"/\". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.

" + }, + "Query":{ + "shape":"RedirectActionQuery", + "documentation":"

The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading \"?\", as it is automatically added. You can specify any of the reserved keywords.

" + }, + "StatusCode":{ + "shape":"RedirectActionStatusCodeEnum", + "documentation":"

The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).

" + } + }, + "documentation":"

Information about a redirect action.

A URI consists of the following components: protocol://hostname:port/path?query. You must modify at least one of the following components to avoid a redirect loop: protocol, hostname, port, or path. Any components that you do not modify retain their original values.

You can reuse URI components using the following reserved keywords:

For example, you can change the path to \"/new/#{path}\", the hostname to \"example.#{host}\", or the query to \"#{query}&value=xyz\".

" + }, + "RedirectActionHost":{ + "type":"string", + "max":128, + "min":1 + }, + "RedirectActionPath":{ + "type":"string", + "max":128, + "min":1 + }, + "RedirectActionPort":{"type":"string"}, + "RedirectActionProtocol":{ + "type":"string", + "pattern":"^(HTTPS?|#\\{protocol\\})$" + }, + "RedirectActionQuery":{ + "type":"string", + "max":128, + "min":0 + }, + "RedirectActionStatusCodeEnum":{ + "type":"string", + "enum":[ + "HTTP_301", + "HTTP_302" + ] + }, "RegisterTargetsInput":{ "type":"structure", "required":[ @@ -2354,7 +2455,7 @@ }, "Values":{ "shape":"ListOfString", - "documentation":"

The condition value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

If the field name is path-pattern, you can specify a single path pattern (for example, /img/*). A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

" + "documentation":"

The condition value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

If the field name is path-pattern, you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.

" } }, "documentation":"

Information about a condition for a rule.

" diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index ae2541e7..69a49569 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -122,7 +122,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.

" + "documentation":"

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on.

" }, "DescribeJobFlows":{ "name":"DescribeJobFlows", @@ -790,7 +790,7 @@ }, "ReleaseLabel":{ "shape":"String", - "documentation":"

The release label for the Amazon EMR release.

" + "documentation":"

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see http://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.

" }, "AutoTerminate":{ "shape":"Boolean", @@ -1649,7 +1649,7 @@ }, "BidPrice":{ "shape":"String", - "documentation":"

The bid price for each EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.

" + "documentation":"

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specify the maximum spot price in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

" }, "InstanceType":{ "shape":"InstanceType", @@ -1712,7 +1712,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

Bid price for each EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.

" + "documentation":"

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specify the maximum spot price in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

" }, "InstanceType":{ "shape":"InstanceType", @@ -1771,7 +1771,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

Bid price for EC2 Instances when launching nodes as Spot Instances, expressed in USD.

" + "documentation":"

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specified in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2180,7 +2180,7 @@ }, "AmiVersion":{ "shape":"XmlStringMaxLen256", - "documentation":"

Used only for version 2.x and 3.x of Amazon EMR. The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions supported by Amazon EMR, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

" + "documentation":"

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

" }, "ExecutionStatusDetail":{ "shape":"JobFlowExecutionStatusDetail", @@ -2322,7 +2322,7 @@ }, "HadoopVersion":{ "shape":"XmlStringMaxLen256", - "documentation":"

The Hadoop version for the cluster. Valid inputs are \"0.18\" (deprecated), \"0.20\" (deprecated), \"0.20.205\" (deprecated), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

" + "documentation":"

Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (deprecated), \"0.20\" (deprecated), \"0.20.205\" (deprecated), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

" }, "Ec2SubnetId":{ "shape":"XmlStringMaxLen256", @@ -2888,11 +2888,11 @@ }, "AmiVersion":{ "shape":"XmlStringMaxLen256", - "documentation":"

For Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, the Linux AMI is determined by the ReleaseLabel specified or by CustomAmiID. The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. For details about the AMI versions currently supported in EMR version 3.x and 2.x, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20), you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

Previously, the EMR AMI version API parameter options allowed you to use latest for the latest AMI version rather than specify a numerical value. Some regions no longer support this deprecated option as they only have a newer release label version of EMR, which requires you to specify an EMR release label release (EMR 4.x or later).

" + "documentation":"

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

" }, "ReleaseLabel":{ "shape":"XmlStringMaxLen256", - "documentation":"

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use AmiVersion instead.

" + "documentation":"

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see http://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.

" }, "Instances":{ "shape":"JobFlowInstancesConfig", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 33325348..f201ae40 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -92,6 +92,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -117,6 +118,18 @@ "us-east-1" : { } } }, + "api.sagemaker" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "apigateway" : { "endpoints" : { "ap-northeast-1" : { }, @@ -179,10 +192,13 @@ "athena" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -237,6 +253,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -554,6 +571,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-southeast-2" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -795,6 +813,12 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "elasticache-fips.us-west-1.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -823,6 +847,7 @@ }, "elasticfilesystem" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -857,7 +882,7 @@ }, "elasticmapreduce" : { "defaults" : { - "protocols" : [ "http", "https" ], + "protocols" : [ "https" ], "sslCommonName" : "{region}.{service}.{dnsSuffix}" }, "endpoints" : { @@ -953,11 +978,15 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1020,6 +1049,7 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, @@ -1036,6 +1066,7 @@ "ap-northeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } }, @@ -1150,6 +1181,7 @@ }, "kinesisanalytics" : { "endpoints" : { + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } @@ -1271,6 +1303,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -1297,6 +1330,7 @@ "mediastore" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -1543,6 +1577,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1582,6 +1617,9 @@ "runtime.sagemaker" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1644,16 +1682,6 @@ "isRegionalized" : true, "partitionEndpoint" : "us-east-1" }, - "sagemaker" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "eu-west-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } - } - }, "sdb" : { "defaults" : { "protocols" : [ "http", "https" ], @@ -1908,6 +1936,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -2311,7 +2340,7 @@ }, "elasticmapreduce" : { "defaults" : { - "protocols" : [ "http", "https" ] + "protocols" : [ "https" ] }, "endpoints" : { "cn-north-1" : { }, @@ -2549,6 +2578,17 @@ "us-gov-west-1" : { } } }, + "data.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-west-1" : { } + } + }, "directconnect" : { "endpoints" : { "us-gov-west-1" : { } @@ -2587,6 +2627,12 @@ }, "elasticache" : { "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "elasticache-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-west-1" : { } } }, @@ -2605,7 +2651,7 @@ "elasticmapreduce" : { "endpoints" : { "us-gov-west-1" : { - "protocols" : [ "http", "https" ] + "protocols" : [ "https" ] } } }, @@ -2643,6 +2689,16 @@ "us-gov-west-1" : { } } }, + "iot" : { + "defaults" : { + "credentialScope" : { + "service" : "execute-api" + } + }, + "endpoints" : { + "us-gov-west-1" : { } + } + }, "kinesis" : { "endpoints" : { "us-gov-west-1" : { } @@ -2745,6 +2801,11 @@ "us-gov-west-1" : { } } }, + "states" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "storagegateway" : { "endpoints" : { "us-gov-west-1" : { } diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index 86f7a086..920507a9 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -170,6 +170,57 @@ ], "documentation":"

Returns information about reserved Elasticsearch instances for this account.

" }, + "GetCompatibleElasticsearchVersions":{ + "name":"GetCompatibleElasticsearchVersions", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/compatibleVersions" + }, + "input":{"shape":"GetCompatibleElasticsearchVersionsRequest"}, + "output":{"shape":"GetCompatibleElasticsearchVersionsResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"}, + {"shape":"ValidationException"}, + {"shape":"InternalException"} + ], + "documentation":"

Returns a list of upgrade compatible Elastisearch versions. You can optionally pass a DomainName to get all upgrade compatible Elasticsearch versions for that specific domain.

" + }, + "GetUpgradeHistory":{ + "name":"GetUpgradeHistory", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/upgradeDomain/{DomainName}/history" + }, + "input":{"shape":"GetUpgradeHistoryRequest"}, + "output":{"shape":"GetUpgradeHistoryResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"}, + {"shape":"ValidationException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves the complete history of the last 10 upgrades that were performed on the domain.

" + }, + "GetUpgradeStatus":{ + "name":"GetUpgradeStatus", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/upgradeDomain/{DomainName}/status" + }, + "input":{"shape":"GetUpgradeStatusRequest"}, + "output":{"shape":"GetUpgradeStatusResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"}, + {"shape":"ValidationException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves the latest status of the last upgrade or upgrade eligibility check that was performed on the domain.

" + }, "ListDomainNames":{ "name":"ListDomainNames", "http":{ @@ -280,6 +331,24 @@ {"shape":"ValidationException"} ], "documentation":"

Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.

" + }, + "UpgradeElasticsearchDomain":{ + "name":"UpgradeElasticsearchDomain", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/upgradeDomain" + }, + "input":{"shape":"UpgradeElasticsearchDomainRequest"}, + "output":{"shape":"UpgradeElasticsearchDomainResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"DisabledOperationException"}, + {"shape":"ValidationException"}, + {"shape":"InternalException"} + ], + "documentation":"

Allows you to either upgrade your domain or perform an Upgrade eligibility check to a compatible Elasticsearch version.

" } }, "shapes":{ @@ -421,6 +490,21 @@ }, "documentation":"

Status of the Cognito options for the specified Elasticsearch domain.

" }, + "CompatibleElasticsearchVersionsList":{ + "type":"list", + "member":{"shape":"CompatibleVersionsMap"} + }, + "CompatibleVersionsMap":{ + "type":"structure", + "members":{ + "SourceVersion":{ + "shape":"ElasticsearchVersionString", + "documentation":"

The current version of Elasticsearch on which a domain is.

" + }, + "TargetVersions":{"shape":"ElasticsearchVersionList"} + }, + "documentation":"

A map from an ElasticsearchVersion to a list of compatible ElasticsearchVersion s to which the domain can be upgraded.

" + }, "CreateElasticsearchDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -946,6 +1030,10 @@ "shape":"Boolean", "documentation":"

The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch Service is processing configuration changes. False if the configuration is active.

" }, + "UpgradeProcessing":{ + "shape":"Boolean", + "documentation":"

The status of an Elasticsearch domain version upgrade. True if Amazon Elasticsearch Service is undergoing a version upgrade. False if the configuration is active.

" + }, "ElasticsearchVersion":{"shape":"ElasticsearchVersionString"}, "ElasticsearchClusterConfig":{ "shape":"ElasticsearchClusterConfig", @@ -1062,6 +1150,93 @@ "type":"string", "pattern":"\\p{XDigit}{8}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{12}" }, + "GetCompatibleElasticsearchVersionsRequest":{ + "type":"structure", + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"querystring", + "locationName":"domainName" + } + }, + "documentation":"

Container for request parameters to GetCompatibleElasticsearchVersions operation.

" + }, + "GetCompatibleElasticsearchVersionsResponse":{ + "type":"structure", + "members":{ + "CompatibleElasticsearchVersions":{ + "shape":"CompatibleElasticsearchVersionsList", + "documentation":"

A map of compatible Elasticsearch versions returned as part of the GetCompatibleElasticsearchVersions operation.

" + } + }, + "documentation":"

Container for response returned by GetCompatibleElasticsearchVersions operation.

" + }, + "GetUpgradeHistoryRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

Container for request parameters to GetUpgradeHistory operation.

" + }, + "GetUpgradeHistoryResponse":{ + "type":"structure", + "members":{ + "UpgradeHistories":{ + "shape":"UpgradeHistoryList", + "documentation":"

A list of UpgradeHistory objects corresponding to each Upgrade or Upgrade Eligibility Check performed on a domain returned as part of GetUpgradeHistoryResponse object.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Pagination token that needs to be supplied to the next call to get the next page of results

" + } + }, + "documentation":"

Container for response returned by GetUpgradeHistory operation.

" + }, + "GetUpgradeStatusRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + }, + "documentation":"

Container for request parameters to GetUpgradeStatus operation.

" + }, + "GetUpgradeStatusResponse":{ + "type":"structure", + "members":{ + "UpgradeStep":{ + "shape":"UpgradeStep", + "documentation":"

Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does through:

" + }, + "StepStatus":{ + "shape":"UpgradeStatus", + "documentation":"

One of 4 statuses that a step can go through returned as part of the GetUpgradeStatusResponse object. The status can take one of the following values:

" + }, + "UpgradeName":{ + "shape":"UpgradeName", + "documentation":"

A string that describes the update briefly

" + } + }, + "documentation":"

Container for response returned by GetUpgradeStatus operation.

" + }, "IdentityPoolId":{ "type":"string", "max":55, @@ -1107,6 +1282,11 @@ "error":{"httpStatusCode":409}, "exception":true }, + "Issue":{"type":"string"}, + "Issues":{ + "type":"list", + "member":{"shape":"Issue"} + }, "KmsKeyId":{ "type":"string", "max":500, @@ -1282,10 +1462,11 @@ }, "LogType":{ "type":"string", - "documentation":"

Type of Log File, it can be one of the following:

", + "documentation":"

Type of Log File, it can be one of the following:

", "enum":[ "INDEX_SLOW_LOGS", - "SEARCH_SLOW_LOGS" + "SEARCH_SLOW_LOGS", + "ES_APPLICATION_LOGS" ] }, "MaxResults":{ @@ -1591,6 +1772,7 @@ }, "documentation":"

Status of a daily automated snapshot.

" }, + "StartTimestamp":{"type":"timestamp"}, "StorageSubTypeName":{ "type":"string", "documentation":"

SubType of the given storage type. List of available sub-storage options: For \"instance\" storageType we wont have any storageSubType, in case of \"ebs\" storageType we will have following valid storageSubTypes

  1. standard
  2. gp2
  3. io1
Refer VolumeType for more information regarding above EBS storage options.

" @@ -1734,6 +1916,110 @@ "documentation":"

The result of an UpdateElasticsearchDomain request. Contains the status of the Elasticsearch domain being updated.

" }, "UpdateTimestamp":{"type":"timestamp"}, + "UpgradeElasticsearchDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "TargetVersion" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "TargetVersion":{ + "shape":"ElasticsearchVersionString", + "documentation":"

The version of Elasticsearch that you intend to upgrade the domain to.

" + }, + "PerformCheckOnly":{ + "shape":"Boolean", + "documentation":"

This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade.

" + } + }, + "documentation":"

Container for request parameters to UpgradeElasticsearchDomain operation.

" + }, + "UpgradeElasticsearchDomainResponse":{ + "type":"structure", + "members":{ + "DomainName":{"shape":"DomainName"}, + "TargetVersion":{ + "shape":"ElasticsearchVersionString", + "documentation":"

The version of Elasticsearch that you intend to upgrade the domain to.

" + }, + "PerformCheckOnly":{ + "shape":"Boolean", + "documentation":"

This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade.

" + } + }, + "documentation":"

Container for response returned by UpgradeElasticsearchDomain operation.

" + }, + "UpgradeHistory":{ + "type":"structure", + "members":{ + "UpgradeName":{ + "shape":"UpgradeName", + "documentation":"

A string that describes the update briefly

" + }, + "StartTimestamp":{ + "shape":"StartTimestamp", + "documentation":"

UTC Timestamp at which the Upgrade API call was made in \"yyyy-MM-ddTHH:mm:ssZ\" format.

" + }, + "UpgradeStatus":{ + "shape":"UpgradeStatus", + "documentation":"

The overall status of the update. The status can take one of the following values:

" + }, + "StepsList":{ + "shape":"UpgradeStepsList", + "documentation":"

A list of UpgradeStepItem s representing information about each step performed as pard of a specific Upgrade or Upgrade Eligibility Check.

" + } + }, + "documentation":"

History of the last 10 Upgrades and Upgrade Eligibility Checks.

" + }, + "UpgradeHistoryList":{ + "type":"list", + "member":{"shape":"UpgradeHistory"} + }, + "UpgradeName":{"type":"string"}, + "UpgradeStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCEEDED", + "SUCCEEDED_WITH_ISSUES", + "FAILED" + ] + }, + "UpgradeStep":{ + "type":"string", + "enum":[ + "PRE_UPGRADE_CHECK", + "SNAPSHOT", + "UPGRADE" + ] + }, + "UpgradeStepItem":{ + "type":"structure", + "members":{ + "UpgradeStep":{ + "shape":"UpgradeStep", + "documentation":"

Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does through:

" + }, + "UpgradeStepStatus":{ + "shape":"UpgradeStatus", + "documentation":"

The status of a particular step during an upgrade. The status can take one of the following values:

" + }, + "Issues":{ + "shape":"Issues", + "documentation":"

A list of strings containing detailed information about the errors encountered in a particular step.

" + }, + "ProgressPercent":{ + "shape":"Double", + "documentation":"

The Floating point value representing progress percentage of a particular step.

" + } + }, + "documentation":"

Represents a single step of the Upgrade or Upgrade Eligibility Check workflow.

" + }, + "UpgradeStepsList":{ + "type":"list", + "member":{"shape":"UpgradeStepItem"} + }, "UserPoolId":{ "type":"string", "max":55, diff --git a/botocore/data/glacier/2012-06-01/service-2.json b/botocore/data/glacier/2012-06-01/service-2.json index 57a8d8e2..350983c2 100644 --- a/botocore/data/glacier/2012-06-01/service-2.json +++ b/botocore/data/glacier/2012-06-01/service-2.json @@ -352,7 +352,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 1000. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

" + "documentation":"

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

" }, "ListMultipartUploads":{ "name":"ListMultipartUploads", @@ -368,7 +368,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 1,000 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

" }, "ListParts":{ "name":"ListParts", @@ -384,7 +384,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 1,000 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

" }, "ListProvisionedCapacity":{ "name":"ListProvisionedCapacity", @@ -431,7 +431,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 1,000 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

" }, "PurchaseProvisionedCapacity":{ "name":"PurchaseProvisionedCapacity", @@ -1745,7 +1745,7 @@ }, "limit":{ "shape":"string", - "documentation":"

The maximum number of jobs to be returned. The default limit is 1000. The number of jobs returned might be fewer than the specified limit, but the number of returned jobs never exceeds the limit.

", + "documentation":"

The maximum number of jobs to be returned. The default limit is 50. The number of jobs returned might be fewer than the specified limit, but the number of returned jobs never exceeds the limit.

", "location":"querystring", "locationName":"limit" }, @@ -1811,7 +1811,7 @@ }, "limit":{ "shape":"string", - "documentation":"

Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 1,000 uploads.

", + "documentation":"

Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 50 uploads.

", "location":"querystring", "locationName":"limit" } @@ -1866,7 +1866,7 @@ }, "limit":{ "shape":"string", - "documentation":"

The maximum number of parts to be returned. The default limit is 1000. The number of parts returned might be fewer than the specified limit, but the number of returned parts never exceeds the limit.

", + "documentation":"

The maximum number of parts to be returned. The default limit is 50. The number of parts returned might be fewer than the specified limit, but the number of returned parts never exceeds the limit.

", "location":"querystring", "locationName":"limit" } @@ -1978,7 +1978,7 @@ }, "limit":{ "shape":"string", - "documentation":"

The maximum number of vaults to be returned. The default limit is 1000. The number of vaults returned might be fewer than the specified limit, but the number of returned vaults never exceeds the limit.

", + "documentation":"

The maximum number of vaults to be returned. The default limit is 10. The number of vaults returned might be fewer than the specified limit, but the number of returned vaults never exceeds the limit.

", "location":"querystring", "locationName":"limit" } @@ -2576,5 +2576,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"

Amazon Glacier is a storage solution for \"cold data.\"

Amazon Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Amazon Glacier, customers can store their data cost effectively for months, years, or decades. Amazon Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

Amazon Glacier is a great storage choice when low storage cost is paramount, your data is rarely retrieved, and retrieval latency of several hours is acceptable. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3).

You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Amazon Glacier.

If you are a first-time user of Amazon Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide:

" + "documentation":"

Amazon Glacier is a storage solution for \"cold data.\"

Amazon Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Amazon Glacier, customers can store their data cost effectively for months, years, or decades. Amazon Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

Amazon Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3).

You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Amazon Glacier.

If you are a first-time user of Amazon Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide:

" } diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index a8e5c661..58b569d7 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -2305,7 +2305,11 @@ }, "PublicKey":{ "shape":"GenericString", - "documentation":"

The public key to use for authentication.

" + "documentation":"

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility, as the recommended attribute to use is public keys.

" + }, + "PublicKeys":{ + "shape":"PublicKeysList", + "documentation":"

A list of public keys to be used by the DevEndpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys: call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

" }, "NumberOfNodes":{ "shape":"IntegerValue", @@ -3032,7 +3036,7 @@ }, "PrivateAddress":{ "shape":"GenericString", - "documentation":"

The private address used by this DevEndpoint.

" + "documentation":"

A private DNS to access the DevEndpoint within a VPC, if the DevEndpoint is created within one.

" }, "ZeppelinRemoteSparkInterpreterPort":{ "shape":"IntegerValue", @@ -3084,7 +3088,11 @@ }, "PublicKey":{ "shape":"GenericString", - "documentation":"

The public key to be used by this DevEndpoint for authentication.

" + "documentation":"

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility, as the recommended attribute to use is public keys.

" + }, + "PublicKeys":{ + "shape":"PublicKeysList", + "documentation":"

A list of public keys to be used by the DevEndpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys: call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

" } }, "documentation":"

A development endpoint where a developer can remotely debug ETL scripts.

" @@ -4861,6 +4869,11 @@ "GROUP" ] }, + "PublicKeysList":{ + "type":"list", + "member":{"shape":"GenericString"}, + "max":5 + }, "PythonScript":{"type":"string"}, "ResetJobBookmarkRequest":{ "type":"structure", @@ -5737,6 +5750,14 @@ "shape":"GenericString", "documentation":"

The public key for the DevEndpoint to use.

" }, + "AddPublicKeys":{ + "shape":"PublicKeysList", + "documentation":"

The list of public keys for the DevEndpoint to use.

" + }, + "DeletePublicKeys":{ + "shape":"PublicKeysList", + "documentation":"

The list of public keys to be deleted from the DevEndpoint.

" + }, "CustomLibraries":{ "shape":"DevEndpointCustomLibraries", "documentation":"

Custom Python or Java libraries to be loaded in the DevEndpoint.

" diff --git a/botocore/data/greengrass/2017-06-07/service-2.json b/botocore/data/greengrass/2017-06-07/service-2.json index 7e6e1697..2bf2ec8a 100644 --- a/botocore/data/greengrass/2017-06-07/service-2.json +++ b/botocore/data/greengrass/2017-06-07/service-2.json @@ -4271,7 +4271,7 @@ }, "SourcePath" : { "shape" : "__string", - "documentation" : "The local absolute path of the volume resource on the host. The source path for a volume resource type cannot start with ''/proc'' or ''/sys''." + "documentation" : "The local absolute path of the volume resource on the host. The source path for a volume resource type cannot start with ''/sys''." } }, "documentation" : "Attributes that define a local volume resource." diff --git a/botocore/data/health/2016-08-04/service-2.json b/botocore/data/health/2016-08-04/service-2.json index 89eb5029..7a3b3ca9 100644 --- a/botocore/data/health/2016-08-04/service-2.json +++ b/botocore/data/health/2016-08-04/service-2.json @@ -108,7 +108,7 @@ }, "eventArn":{ "shape":"eventArn", - "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/EVENT_TYPE_PLUS_ID . Example: arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331

" + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" }, "entityValue":{ "shape":"entityValue", @@ -187,7 +187,7 @@ "members":{ "eventArns":{ "shape":"EventArnsList", - "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331\", \"arn:aws:health:us-west-1::event/AWS_EBS_LOST_VOLUME_xyz\"

" + "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"

" } } }, @@ -245,7 +245,7 @@ "members":{ "eventArns":{ "shape":"eventArnList", - "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331\", \"arn:aws:health:us-west-1::event/AWS_EBS_LOST_VOLUME_xyz\"

" + "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"

" }, "locale":{ "shape":"locale", @@ -343,7 +343,7 @@ "members":{ "eventArn":{ "shape":"eventArn", - "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/EVENT_TYPE_PLUS_ID . Example: arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331

" + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" }, "count":{ "shape":"count", @@ -362,7 +362,7 @@ "members":{ "eventArns":{ "shape":"eventArnList", - "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331\", \"arn:aws:health:us-west-1::event/AWS_EBS_LOST_VOLUME_xyz\"

" + "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"

" }, "entityArns":{ "shape":"entityArnList", @@ -396,7 +396,7 @@ "members":{ "arn":{ "shape":"eventArn", - "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/EVENT_TYPE_PLUS_ID . Example: arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331

" + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" }, "service":{ "shape":"service", @@ -408,7 +408,7 @@ }, "eventTypeCategory":{ "shape":"eventTypeCategory", - "documentation":"

The

" + "documentation":"

The category of the event. Possible values are issue, scheduledChange, and accountNotification.

" }, "region":{ "shape":"region", @@ -494,7 +494,7 @@ "members":{ "eventArn":{ "shape":"eventArn", - "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/EVENT_TYPE_PLUS_ID . Example: arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331

" + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" }, "errorName":{ "shape":"string", @@ -512,7 +512,7 @@ "members":{ "eventArns":{ "shape":"eventArnList", - "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/AWS_EC2_MAINTENANCE_5331\", \"arn:aws:health:us-west-1::event/AWS_EBS_LOST_VOLUME_xyz\"

" + "documentation":"

A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"

" }, "eventTypeCodes":{ "shape":"eventTypeList", @@ -698,7 +698,7 @@ "eventArn":{ "type":"string", "max":1600, - "pattern":"arn:aws:health:[^:]*:[^:]*:event/[\\w-]+" + "pattern":"arn:aws:health:[^:]*:[^:]*:event(?:/[\\w-]+){1}((?:/[\\w-]+){2})?" }, "eventArnList":{ "type":"list", diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index 4159d8b6..be52c7ba 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -562,6 +562,20 @@ ], "documentation":"

Deletes the specified role. The role must not have any policies attached. For more information about roles, go to Working with Roles.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

" }, + "DeleteRolePermissionsBoundary":{ + "name":"DeleteRolePermissionsBoundary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRolePermissionsBoundaryRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"UnmodifiableEntityException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes the permissions boundary for the specified IAM role.

Deleting the permissions boundary for a role might increase its permissions by allowing anyone who assumes the role to perform all the actions granted in its permissions policies.

" + }, "DeleteRolePolicy":{ "name":"DeleteRolePolicy", "http":{ @@ -678,6 +692,19 @@ ], "documentation":"

Deletes the specified IAM user. The user must not belong to any groups or have any access keys, signing certificates, or attached policies.

" }, + "DeleteUserPermissionsBoundary":{ + "name":"DeleteUserPermissionsBoundary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserPermissionsBoundaryRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes the permissions boundary for the specified IAM user.

Deleting the permissions boundary for a user might increase its permissions by allowing the user to perform all the actions granted in its permissions policies.

" + }, "DeleteUserPolicy":{ "name":"DeleteUserPolicy", "http":{ @@ -1591,6 +1618,22 @@ ], "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM group.

A user can also have managed policies attached to it. To attach a managed policy to a group, use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

For information about limits on the number of inline policies that you can embed in a group, see Limitations on IAM Entities in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

" }, + "PutRolePermissionsBoundary":{ + "name":"PutRolePermissionsBoundary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRolePermissionsBoundaryRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"UnmodifiableEntityException"}, + {"shape":"PolicyNotAttachableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds or updates the policy that is specified as the IAM role's permissions boundary. You can use an AWS managed policy or a customer managed policy to set the boundary for a role. Use the boundary to control the maximum permissions that the role can have. Setting a permissions boundary is an advanced feature that can affect the permissions for the role.

You cannot set the boundary for a service-linked role.

Policies used as permissions boundaries do not provide permissions. You must also attach a permissions policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON Policy Evaluation Logic in the IAM User Guide.

" + }, "PutRolePolicy":{ "name":"PutRolePolicy", "http":{ @@ -1607,6 +1650,21 @@ ], "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM role.

When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. For more information about IAM roles, go to Using Roles to Delegate Permissions and Federate Identities.

A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

For information about limits on the number of inline policies that you can embed with a role, see Limitations on IAM Entities in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

" }, + "PutUserPermissionsBoundary":{ + "name":"PutUserPermissionsBoundary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutUserPermissionsBoundaryRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyNotAttachableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds or updates the policy that is specified as the IAM user's permissions boundary. You can use an AWS managed policy or a customer managed policy to set the boundary for a user. Use the boundary to control the maximum permissions that the user can have. Setting a permissions boundary is an advanced feature that can affect the permissions for the user.

Policies that are used as permissions boundaries do not provide permissions. You must also attach a permissions policy to the user. To learn how the effective permissions for a user are evaluated, see IAM JSON Policy Evaluation Logic in the IAM User Guide.

" + }, "PutUserPolicy":{ "name":"PutUserPolicy", "http":{ @@ -1928,7 +1986,7 @@ "errors":[ {"shape":"NoSuchEntityException"} ], - "documentation":"

Sets the status of a service-specific credential to Active or Inactive. Service-specific credentials that are inactive cannot be used for authentication to the service. This operation can be used to disable a user’s service-specific credential as part of a credential rotation work flow.

" + "documentation":"

Sets the status of a service-specific credential to Active or Inactive. Service-specific credentials that are inactive cannot be used for authentication to the service. This operation can be used to disable a user's service-specific credential as part of a credential rotation work flow.

" }, "UpdateSigningCertificate":{ "name":"UpdateSigningCertificate", @@ -2216,6 +2274,20 @@ } } }, + "AttachedPermissionsBoundary":{ + "type":"structure", + "members":{ + "PermissionsBoundaryType":{ + "shape":"PermissionsBoundaryAttachmentType", + "documentation":"

The permissions boundary usage type that indicates what type of IAM resource is used as the permissions boundary for an entity. This data type can only have a value of Policy.

" + }, + "PermissionsBoundaryArn":{ + "shape":"arnType", + "documentation":"

The ARN of the policy used to set the permissions boundary for the user or role.

" + } + }, + "documentation":"

Contains information about an attached permissions boundary.

An attached permissions boundary is a managed policy that has been attached to a user or role to set the permissions boundary.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + }, "AttachedPolicy":{ "type":"structure", "members":{ @@ -2537,6 +2609,10 @@ "MaxSessionDuration":{ "shape":"roleMaxSessionDurationType", "documentation":"

The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours.

Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

" + }, + "PermissionsBoundary":{ + "shape":"arnType", + "documentation":"

The ARN of the policy that is used to set the permissions boundary for the role.

" } } }, @@ -2642,6 +2718,10 @@ "UserName":{ "shape":"userNameType", "documentation":"

The name of the user to create.

This parameter allows (per its regex pattern) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-. User names are not distinguished by case. For example, you cannot create users named both \"TESTUSER\" and \"testuser\".

" + }, + "PermissionsBoundary":{ + "shape":"arnType", + "documentation":"

The ARN of the policy that is used to set the permissions boundary for the user.

" } } }, @@ -2857,6 +2937,16 @@ } } }, + "DeleteRolePermissionsBoundaryRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{ + "shape":"roleNameType", + "documentation":"

The name (friendly name, not ARN) of the IAM role from which you want to remove the permissions boundary.

" + } + } + }, "DeleteRolePolicyRequest":{ "type":"structure", "required":[ @@ -2969,6 +3059,16 @@ } } }, + "DeleteUserPermissionsBoundaryRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{ + "shape":"userNameType", + "documentation":"

The name (friendly name, not ARN) of the IAM user from which you want to remove the permissions boundary.

" + } + } + }, "DeleteUserPolicyRequest":{ "type":"structure", "required":[ @@ -3814,7 +3914,7 @@ "members":{ "User":{ "shape":"User", - "documentation":"

A structure containing details about the IAM user.

" + "documentation":"

A structure containing details about the IAM user.

Due to a service issue, password last used data does not include password use from May 3rd 2018 22:50 PDT to May 23rd 2018 14:08 PDT. This affects last sign-in dates shown in the IAM console and password last used dates in the IAM credential report, and returned by this GetUser API. If users signed in during the affected time, the password last used date that is returned is the date the user last signed in before May 3rd 2018. For users that signed in after May 23rd 2018 14:08 PDT, the returned password last used date is accurate.

If you use password last used information to identify unused credentials for deletion, such as deleting users who did not sign in to AWS in the last 90 days, we recommend that you adjust your evaluation window to include dates after May 23rd 2018. Alternatively, if your users use access keys to access AWS programmatically you can refer to access key last used information because it is accurate for all dates.

" } }, "documentation":"

Contains the response to a successful GetUser request.

" @@ -4217,6 +4317,10 @@ "shape":"pathType", "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.

This parameter allows (per its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, + "PolicyUsageFilter":{ + "shape":"PolicyUsageType", + "documentation":"

The policy usage method to use for filtering the results.

To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. To list only the policies used to set permissions boundaries, set the value to PermissionsBoundary.

This parameter is optional. If it is not included, all policies are returned.

" + }, "Marker":{ "shape":"markerType", "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" @@ -4502,6 +4606,10 @@ "shape":"policyPathType", "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies. This parameter allows (per its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, + "PolicyUsageFilter":{ + "shape":"PolicyUsageType", + "documentation":"

The policy usage method to use for filtering the results.

To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. To list only the policies used to set permissions boundaries, set the value to PermissionsBoundary.

This parameter is optional. If it is not included, all policies are returned.

" + }, "Marker":{ "shape":"markerType", "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" @@ -4987,6 +5095,10 @@ "shape":"attachmentCountType", "documentation":"

The number of principal entities (users, groups, and roles) that the policy is attached to.

" }, + "PermissionsBoundaryUsageCount":{ + "shape":"attachmentCountType", + "documentation":"

The number of entities (users and roles) for which the policy is used as the permissions boundary.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + }, "IsAttachable":{ "shape":"booleanType", "documentation":"

Specifies whether the policy can be attached to an IAM user, group, or role.

" @@ -5114,6 +5226,10 @@ }, "exception":true }, + "PermissionsBoundaryAttachmentType":{ + "type":"string", + "enum":["PermissionsBoundaryPolicy"] + }, "Policy":{ "type":"structure", "members":{ @@ -5138,6 +5254,10 @@ "shape":"attachmentCountType", "documentation":"

The number of entities (users, groups, and roles) that the policy is attached to.

" }, + "PermissionsBoundaryUsageCount":{ + "shape":"attachmentCountType", + "documentation":"

The number of entities (users and roles) for which the policy is used to set the permissions boundary.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + }, "IsAttachable":{ "shape":"booleanType", "documentation":"

Specifies whether the policy can be attached to an IAM user, group, or role.

" @@ -5253,6 +5373,14 @@ "none" ] }, + "PolicyUsageType":{ + "type":"string", + "documentation":"

The policy usage type that indicates whether the policy is used as a permissions policy or as the permissions boundary for an entity.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

", + "enum":[ + "PermissionsPolicy", + "PermissionsBoundary" + ] + }, "PolicyUser":{ "type":"structure", "members":{ @@ -5329,6 +5457,23 @@ } } }, + "PutRolePermissionsBoundaryRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PermissionsBoundary" + ], + "members":{ + "RoleName":{ + "shape":"roleNameType", + "documentation":"

The name (friendly name, not ARN) of the IAM role for which you want to set the permissions boundary.

" + }, + "PermissionsBoundary":{ + "shape":"arnType", + "documentation":"

The ARN of the policy that is used to set the permissions boundary for the role.

" + } + } + }, "PutRolePolicyRequest":{ "type":"structure", "required":[ @@ -5351,6 +5496,23 @@ } } }, + "PutUserPermissionsBoundaryRequest":{ + "type":"structure", + "required":[ + "UserName", + "PermissionsBoundary" + ], + "members":{ + "UserName":{ + "shape":"userNameType", + "documentation":"

The name (friendly name, not ARN) of the IAM user for which you want to set the permissions boundary.

" + }, + "PermissionsBoundary":{ + "shape":"arnType", + "documentation":"

The ARN of the policy that is used to set the permissions boundary for the user.

" + } + } + }, "PutUserPolicyRequest":{ "type":"structure", "required":[ @@ -5586,6 +5748,10 @@ "MaxSessionDuration":{ "shape":"roleMaxSessionDurationType", "documentation":"

The maximum session duration (in seconds) for the specified role. Anyone who uses the AWS CLI or API to assume the role can specify the duration using the optional DurationSeconds API parameter or duration-seconds CLI parameter.

" + }, + "PermissionsBoundary":{ + "shape":"AttachedPermissionsBoundary", + "documentation":"

The ARN of the policy used to set the permissions boundary for the role.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM role. This structure is returned as a response element in several API operations that interact with roles.

" @@ -5625,6 +5791,10 @@ "AttachedManagedPolicies":{ "shape":"attachedPoliciesListType", "documentation":"

A list of managed policies attached to the role. These policies are the role's access (permissions) policies.

" + }, + "PermissionsBoundary":{ + "shape":"AttachedPermissionsBoundary", + "documentation":"

The ARN of the policy used to set the permissions boundary for the role.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM role, including all of the role's policies.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

" @@ -5991,7 +6161,7 @@ }, "ResourceOwner":{ "shape":"ResourceNameType", - "documentation":"

An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn. This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn.

" + "documentation":"

An ARN representing the AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn. This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn.

The ARN for an account uses the following syntax: arn:aws:iam::AWS-account-ID:root. For example, to represent the account with the 112233445566 ID, use the following ARN: arn:aws:iam::112233445566-ID:root.

" }, "CallerArn":{ "shape":"ResourceNameType", @@ -6172,7 +6342,7 @@ }, "RequireSymbols":{ "shape":"booleanType", - "documentation":"

Specifies whether IAM user passwords must contain at least one of the following non-alphanumeric characters:

! @ # $ % ^ &amp; * ( ) _ + - = [ ] { } | '

If you do not specify a value for this parameter, then the operation uses the default value of false. The result is that passwords do not require at least one symbol character.

" + "documentation":"

Specifies whether IAM user passwords must contain at least one of the following non-alphanumeric characters:

! @ # $ % ^ & * ( ) _ + - = [ ] { } | '

If you do not specify a value for this parameter, then the operation uses the default value of false. The result is that passwords do not require at least one symbol character.

" }, "RequireNumbers":{ "shape":"booleanType", @@ -6463,7 +6633,7 @@ }, "SSHPublicKeyBody":{ "shape":"publicKeyMaterialType", - "documentation":"

The SSH public key. The public key must be encoded in ssh-rsa format or PEM format.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" + "documentation":"

The SSH public key. The public key must be encoded in ssh-rsa format or PEM format. The miminum bit-length of the public key is 2048 bits. For example, you can generate a 2048-bit key, and the resulting PEM file is 1679 bytes long.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" } } }, @@ -6575,6 +6745,10 @@ "PasswordLastUsed":{ "shape":"dateType", "documentation":"

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value) then it indicates that they never signed in with a password. This can be because:

A null does not mean that the user never had a password. Also, if the user does not currently have a password, but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

" + }, + "PermissionsBoundary":{ + "shape":"AttachedPermissionsBoundary", + "documentation":"

The ARN of the policy used to set the permissions boundary for the user.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM user entity.

This data type is used as a response element in the following operations:

" @@ -6610,6 +6784,10 @@ "AttachedManagedPolicies":{ "shape":"attachedPoliciesListType", "documentation":"

A list of the managed policies attached to the user.

" + }, + "PermissionsBoundary":{ + "shape":"AttachedPermissionsBoundary", + "documentation":"

The ARN of the policy used to set the permissions boundary for the user.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM user, including all the user's policies and all the IAM groups the user is in.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

" diff --git a/botocore/data/inspector/2016-02-16/service-2.json b/botocore/data/inspector/2016-02-16/service-2.json index a560d14c..cc6ef8a2 100644 --- a/botocore/data/inspector/2016-02-16/service-2.json +++ b/botocore/data/inspector/2016-02-16/service-2.json @@ -24,7 +24,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Assigns attributes (key and value pairs) to the findings that are specified by the ARNs of the findings.

" }, @@ -42,9 +43,10 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, {"shape":"NoSuchEntityException"}, - {"shape":"InvalidCrossAccountRoleException"} + {"shape":"InvalidCrossAccountRoleException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], - "documentation":"

Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. If the service-linked role isn’t already registered, also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets.

" + "documentation":"

Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. If resourceGroupArn is not specified, all EC2 instances in the current AWS account and region are included in the assessment target. If the service-linked role isn’t already registered, this action also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets.

" }, "CreateAssessmentTemplate":{ "name":"CreateAssessmentTemplate", @@ -59,9 +61,10 @@ {"shape":"InvalidInputException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], - "documentation":"

Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. If the service-linked role isn’t already registered, also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments.

" + "documentation":"

Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. If the service-linked role isn’t already registered, this action also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments.

" }, "CreateExclusionsPreview":{ "name":"CreateExclusionsPreview", @@ -76,7 +79,8 @@ {"shape":"PreviewGenerationInProgressException"}, {"shape":"InternalException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Starts the generation of an exclusions preview for the specified assessment template. The exclusions preview lists the potential exclusions (ExclusionPreview) that Inspector can detect before it runs the assessment.

" }, @@ -92,7 +96,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"LimitExceededException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Amazon Inspector assessment target. The created resource group is then used to create an Amazon Inspector assessment target. For more information, see CreateAssessmentTarget.

" }, @@ -108,7 +113,8 @@ {"shape":"InvalidInputException"}, {"shape":"AssessmentRunInProgressException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Deletes the assessment run that is specified by the ARN of the assessment run.

" }, @@ -124,7 +130,8 @@ {"shape":"InvalidInputException"}, {"shape":"AssessmentRunInProgressException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Deletes the assessment target that is specified by the ARN of the assessment target.

" }, @@ -140,7 +147,8 @@ {"shape":"InvalidInputException"}, {"shape":"AssessmentRunInProgressException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Deletes the assessment template that is specified by the ARN of the assessment template.

" }, @@ -268,7 +276,8 @@ {"shape":"AccessDeniedException"}, {"shape":"NoSuchEntityException"}, {"shape":"AssessmentRunInProgressException"}, - {"shape":"UnsupportedFeatureException"} + {"shape":"UnsupportedFeatureException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Produces an assessment report that includes detailed and comprehensive results of a specified assessment run.

" }, @@ -474,7 +483,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"InvalidCrossAccountRoleException"} + {"shape":"InvalidCrossAccountRoleException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Registers the IAM role that grants Amazon Inspector access to AWS Services needed to perform security assessments.

" }, @@ -490,7 +500,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Removes entire attributes (key and value pairs) from the findings that are specified by the ARNs of the findings where an attribute with the specified key exists.

" }, @@ -505,7 +516,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Sets tags (key and value pairs) to the assessment template that is specified by the ARN of the assessment template.

" }, @@ -524,7 +536,8 @@ {"shape":"AccessDeniedException"}, {"shape":"NoSuchEntityException"}, {"shape":"InvalidCrossAccountRoleException"}, - {"shape":"AgentsAlreadyRunningAssessmentException"} + {"shape":"AgentsAlreadyRunningAssessmentException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Starts the assessment run specified by the ARN of the assessment template. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account.

" }, @@ -539,7 +552,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Stops the assessment run that is specified by the ARN of the assessment run.

" }, @@ -555,7 +569,8 @@ {"shape":"InvalidInputException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Enables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic.

" }, @@ -570,7 +585,8 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], "documentation":"

Disables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic.

" }, @@ -585,9 +601,10 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceTemporarilyUnavailableException"} ], - "documentation":"

Updates the assessment target that is specified by the ARN of the assessment target.

" + "documentation":"

Updates the assessment target that is specified by the ARN of the assessment target.

If resourceGroupArn is not specified, all EC2 instances in the current AWS account and region are included in the assessment target.

" } }, "shapes":{ @@ -1397,7 +1414,7 @@ }, "resourceGroupArn":{ "shape":"Arn", - "documentation":"

The ARN that specifies the resource group that is used to create the assessment target.

" + "documentation":"

The ARN that specifies the resource group that is used to create the assessment target. If resourceGroupArn is not specified, all EC2 instances in the current AWS account and region are included in the assessment target.

" } } }, @@ -1430,7 +1447,7 @@ }, "durationInSeconds":{ "shape":"AssessmentRunDuration", - "documentation":"

The duration of the assessment run in seconds. The default value is 3600 seconds (one hour).

" + "documentation":"

The duration of the assessment run in seconds.

" }, "rulesPackageArns":{ "shape":"AssessmentTemplateRulesPackageArnList", @@ -2988,6 +3005,25 @@ "max":128, "min":0 }, + "ServiceTemporarilyUnavailableException":{ + "type":"structure", + "required":[ + "message", + "canRetry" + ], + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

Details of the exception error.

" + }, + "canRetry":{ + "shape":"Bool", + "documentation":"

You can wait and then retry your request.

" + } + }, + "documentation":"

The serice is temporary unavailable.

", + "exception":true + }, "SetTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 1c16d66e..ae7565de 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -99,6 +99,24 @@ "documentation":"

Attaches the specified policy to the specified principal (certificate or other credential).

Note: This API is deprecated. Please use AttachPolicy instead.

", "deprecated":true }, + "AttachSecurityProfile":{ + "name":"AttachSecurityProfile", + "http":{ + "method":"PUT", + "requestUri":"/security-profiles/{securityProfileName}/targets" + }, + "input":{"shape":"AttachSecurityProfileRequest"}, + "output":{"shape":"AttachSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"VersionConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Associates a Device Defender security profile with a thing group or with this account. Each thing group or account can have up to five security profiles associated with it.

" + }, "AttachThingPrincipal":{ "name":"AttachThingPrincipal", "http":{ @@ -117,6 +135,22 @@ ], "documentation":"

Attaches the specified principal to the specified thing.

" }, + "CancelAuditTask":{ + "name":"CancelAuditTask", + "http":{ + "method":"PUT", + "requestUri":"/audit/tasks/{taskId}/cancel" + }, + "input":{"shape":"CancelAuditTaskRequest"}, + "output":{"shape":"CancelAuditTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Cancels an audit that is in progress. The audit can be either scheduled or on-demand. If the audit is not in progress, an \"InvalidRequestException\" occurs.

" + }, "CancelCertificateTransfer":{ "name":"CancelCertificateTransfer", "http":{ @@ -334,6 +368,38 @@ ], "documentation":"

Creates a role alias.

" }, + "CreateScheduledAudit":{ + "name":"CreateScheduledAudit", + "http":{ + "method":"POST", + "requestUri":"/audit/scheduledaudits/{scheduledAuditName}" + }, + "input":{"shape":"CreateScheduledAuditRequest"}, + "output":{"shape":"CreateScheduledAuditResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a scheduled audit that is run at a specified time interval.

" + }, + "CreateSecurityProfile":{ + "name":"CreateSecurityProfile", + "http":{ + "method":"POST", + "requestUri":"/security-profiles/{securityProfileName}" + }, + "input":{"shape":"CreateSecurityProfileRequest"}, + "output":{"shape":"CreateSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Creates a Device Defender security profile.

" + }, "CreateStream":{ "name":"CreateStream", "http":{ @@ -370,7 +436,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a thing record in the registry.

" + "documentation":"

Creates a thing record in the registry.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

" }, "CreateThingGroup":{ "name":"CreateThingGroup", @@ -386,7 +452,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Create a thing group.

" + "documentation":"

Create a thing group.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

" }, "CreateThingType":{ "name":"CreateThingType", @@ -422,6 +488,22 @@ ], "documentation":"

Creates a rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.

" }, + "DeleteAccountAuditConfiguration":{ + "name":"DeleteAccountAuditConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/audit/configuration" + }, + "input":{"shape":"DeleteAccountAuditConfigurationRequest"}, + "output":{"shape":"DeleteAccountAuditConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Restores the default settings for Device Defender audits for this account. Any configuration data you entered is deleted and all audit checks are reset to disabled.

" + }, "DeleteAuthorizer":{ "name":"DeleteAuthorizer", "http":{ @@ -602,6 +684,38 @@ ], "documentation":"

Deletes a role alias

" }, + "DeleteScheduledAudit":{ + "name":"DeleteScheduledAudit", + "http":{ + "method":"DELETE", + "requestUri":"/audit/scheduledaudits/{scheduledAuditName}" + }, + "input":{"shape":"DeleteScheduledAuditRequest"}, + "output":{"shape":"DeleteScheduledAuditResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes a scheduled audit.

" + }, + "DeleteSecurityProfile":{ + "name":"DeleteSecurityProfile", + "http":{ + "method":"DELETE", + "requestUri":"/security-profiles/{securityProfileName}" + }, + "input":{"shape":"DeleteSecurityProfileRequest"}, + "output":{"shape":"DeleteSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"VersionConflictException"} + ], + "documentation":"

Deletes a Device Defender security profile.

" + }, "DeleteStream":{ "name":"DeleteStream", "http":{ @@ -721,6 +835,36 @@ ], "documentation":"

Deprecates a thing type. You can not associate new things with deprecated thing type.

" }, + "DescribeAccountAuditConfiguration":{ + "name":"DescribeAccountAuditConfiguration", + "http":{ + "method":"GET", + "requestUri":"/audit/configuration" + }, + "input":{"shape":"DescribeAccountAuditConfigurationRequest"}, + "output":{"shape":"DescribeAccountAuditConfigurationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Gets information about the Device Defender audit settings for this account. Settings include how audit notifications are sent and which audit checks are enabled or disabled.

" + }, + "DescribeAuditTask":{ + "name":"DescribeAuditTask", + "http":{ + "method":"GET", + "requestUri":"/audit/tasks/{taskId}" + }, + "input":{"shape":"DescribeAuditTaskRequest"}, + "output":{"shape":"DescribeAuditTaskResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Gets information about a Device Defender audit.

" + }, "DescribeAuthorizer":{ "name":"DescribeAuthorizer", "http":{ @@ -891,6 +1035,38 @@ ], "documentation":"

Describes a role alias.

" }, + "DescribeScheduledAudit":{ + "name":"DescribeScheduledAudit", + "http":{ + "method":"GET", + "requestUri":"/audit/scheduledaudits/{scheduledAuditName}" + }, + "input":{"shape":"DescribeScheduledAuditRequest"}, + "output":{"shape":"DescribeScheduledAuditResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Gets information about a scheduled audit.

" + }, + "DescribeSecurityProfile":{ + "name":"DescribeSecurityProfile", + "http":{ + "method":"GET", + "requestUri":"/security-profiles/{securityProfileName}" + }, + "input":{"shape":"DescribeSecurityProfileRequest"}, + "output":{"shape":"DescribeSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Gets information about a Device Defender security profile.

" + }, "DescribeStream":{ "name":"DescribeStream", "http":{ @@ -1013,6 +1189,22 @@ "documentation":"

Removes the specified policy from the specified certificate.

Note: This API is deprecated. Please use DetachPolicy instead.

", "deprecated":true }, + "DetachSecurityProfile":{ + "name":"DetachSecurityProfile", + "http":{ + "method":"DELETE", + "requestUri":"/security-profiles/{securityProfileName}/targets" + }, + "input":{"shape":"DetachSecurityProfileRequest"}, + "output":{"shape":"DetachSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Disassociates a Device Defender security profile from a thing group or from this account.

" + }, "DetachThingPrincipal":{ "name":"DetachThingPrincipal", "http":{ @@ -1126,7 +1318,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Gets the logging options.

" + "documentation":"

Gets the logging options.

NOTE: use of this command is not recommended. Use GetV2LoggingOptions instead.

" }, "GetOTAUpdate":{ "name":"GetOTAUpdate", @@ -1225,11 +1417,27 @@ "output":{"shape":"GetV2LoggingOptionsResponse"}, "errors":[ {"shape":"InternalException"}, - {"shape":"InvalidRequestException"}, + {"shape":"NotConfiguredException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

Gets the fine grained logging options.

" }, + "ListActiveViolations":{ + "name":"ListActiveViolations", + "http":{ + "method":"GET", + "requestUri":"/active-violations" + }, + "input":{"shape":"ListActiveViolationsRequest"}, + "output":{"shape":"ListActiveViolationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists the active violations for a given Device Defender security profile.

" + }, "ListAttachedPolicies":{ "name":"ListAttachedPolicies", "http":{ @@ -1249,6 +1457,36 @@ ], "documentation":"

Lists the policies attached to the specified thing group.

" }, + "ListAuditFindings":{ + "name":"ListAuditFindings", + "http":{ + "method":"POST", + "requestUri":"/audit/findings" + }, + "input":{"shape":"ListAuditFindingsRequest"}, + "output":{"shape":"ListAuditFindingsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists the findings (results) of a Device Defender audit or of the audits performed during a specified time period. (Findings are retained for 180 days.)

" + }, + "ListAuditTasks":{ + "name":"ListAuditTasks", + "http":{ + "method":"GET", + "requestUri":"/audit/tasks" + }, + "input":{"shape":"ListAuditTasksRequest"}, + "output":{"shape":"ListAuditTasksResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists the Device Defender audits that have been performed during a given time period.

" + }, "ListAuthorizers":{ "name":"ListAuthorizers", "http":{ @@ -1524,6 +1762,52 @@ ], "documentation":"

Lists the role aliases registered in your account.

" }, + "ListScheduledAudits":{ + "name":"ListScheduledAudits", + "http":{ + "method":"GET", + "requestUri":"/audit/scheduledaudits" + }, + "input":{"shape":"ListScheduledAuditsRequest"}, + "output":{"shape":"ListScheduledAuditsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists all of your scheduled audits.

" + }, + "ListSecurityProfiles":{ + "name":"ListSecurityProfiles", + "http":{ + "method":"GET", + "requestUri":"/security-profiles" + }, + "input":{"shape":"ListSecurityProfilesRequest"}, + "output":{"shape":"ListSecurityProfilesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists the Device Defender security profiles you have created. You can use filters to list only those security profiles associated with a thing group or only those associated with your account.

" + }, + "ListSecurityProfilesForTarget":{ + "name":"ListSecurityProfilesForTarget", + "http":{ + "method":"GET", + "requestUri":"/security-profiles-for-target" + }, + "input":{"shape":"ListSecurityProfilesForTargetRequest"}, + "output":{"shape":"ListSecurityProfilesForTargetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the Device Defender security profiles attached to a target (thing group).

" + }, "ListStreams":{ "name":"ListStreams", "http":{ @@ -1560,6 +1844,22 @@ ], "documentation":"

List targets for the specified policy.

" }, + "ListTargetsForSecurityProfile":{ + "name":"ListTargetsForSecurityProfile", + "http":{ + "method":"GET", + "requestUri":"/security-profiles/{securityProfileName}/targets" + }, + "input":{"shape":"ListTargetsForSecurityProfileRequest"}, + "output":{"shape":"ListTargetsForSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists the targets (thing groups) associated with a given Device Defender security profile.

" + }, "ListThingGroups":{ "name":"ListThingGroups", "http":{ @@ -1720,6 +2020,21 @@ ], "documentation":"

Lists logging levels.

" }, + "ListViolationEvents":{ + "name":"ListViolationEvents", + "http":{ + "method":"GET", + "requestUri":"/violation-events" + }, + "input":{"shape":"ListViolationEventsRequest"}, + "output":{"shape":"ListViolationEventsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior or thing (device).

" + }, "RegisterCACertificate":{ "name":"RegisterCACertificate", "http":{ @@ -1899,7 +2214,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Sets the logging options.

" + "documentation":"

Sets the logging options.

NOTE: use of this command is not recommended. Use SetV2LoggingOptions instead.

" }, "SetV2LoggingLevel":{ "name":"SetV2LoggingLevel", @@ -1930,6 +2245,22 @@ ], "documentation":"

Sets the logging options for the V2 logging service.

" }, + "StartOnDemandAuditTask":{ + "name":"StartOnDemandAuditTask", + "http":{ + "method":"POST", + "requestUri":"/audit/tasks" + }, + "input":{"shape":"StartOnDemandAuditTaskRequest"}, + "output":{"shape":"StartOnDemandAuditTaskResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Starts an on-demand Device Defender audit.

" + }, "StartThingRegistrationTask":{ "name":"StartThingRegistrationTask", "http":{ @@ -2021,6 +2352,21 @@ ], "documentation":"

Transfers the specified certificate to the specified AWS account.

You can cancel the transfer until it is acknowledged by the recipient.

No notification is sent to the transfer destination's account. It is up to the caller to notify the transfer target.

The certificate being transferred must not be in the ACTIVE state. You can use the UpdateCertificate API to deactivate it.

The certificate must not have any policies attached to it. You can use the DetachPrincipalPolicy API to detach them.

" }, + "UpdateAccountAuditConfiguration":{ + "name":"UpdateAccountAuditConfiguration", + "http":{ + "method":"PATCH", + "requestUri":"/audit/configuration" + }, + "input":{"shape":"UpdateAccountAuditConfigurationRequest"}, + "output":{"shape":"UpdateAccountAuditConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Configures or reconfigures the Device Defender audit settings for this account. Settings include how audit notifications are sent and which audit checks are enabled or disabled.

" + }, "UpdateAuthorizer":{ "name":"UpdateAuthorizer", "http":{ @@ -2125,6 +2471,39 @@ ], "documentation":"

Updates a role alias.

" }, + "UpdateScheduledAudit":{ + "name":"UpdateScheduledAudit", + "http":{ + "method":"PATCH", + "requestUri":"/audit/scheduledaudits/{scheduledAuditName}" + }, + "input":{"shape":"UpdateScheduledAuditRequest"}, + "output":{"shape":"UpdateScheduledAuditResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Updates a scheduled audit, including what checks are performed and how often the audit takes place.

" + }, + "UpdateSecurityProfile":{ + "name":"UpdateSecurityProfile", + "http":{ + "method":"PATCH", + "requestUri":"/security-profiles/{securityProfileName}" + }, + "input":{"shape":"UpdateSecurityProfileRequest"}, + "output":{"shape":"UpdateSecurityProfileResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"VersionConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Updates a Device Defender security profile.

" + }, "UpdateStream":{ "name":"UpdateStream", "http":{ @@ -2194,6 +2573,21 @@ {"shape":"ResourceNotFoundException"} ], "documentation":"

Updates the groups to which the thing belongs.

" + }, + "ValidateSecurityProfileBehaviors":{ + "name":"ValidateSecurityProfileBehaviors", + "http":{ + "method":"POST", + "requestUri":"/security-profile-behaviors/validate" + }, + "input":{"shape":"ValidateSecurityProfileBehaviorsRequest"}, + "output":{"shape":"ValidateSecurityProfileBehaviorsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Validates a Device Defender security profile behaviors specification.

" } }, "shapes":{ @@ -2274,6 +2668,10 @@ "iotAnalytics":{ "shape":"IotAnalyticsAction", "documentation":"

Sends message data to an AWS IoT Analytics channel.

" + }, + "stepFunctions":{ + "shape":"StepFunctionsAction", + "documentation":"

Starts execution of a Step Functions state machine.

" } }, "documentation":"

Describes the actions associated with a rule.

" @@ -2293,6 +2691,44 @@ "CONNECT" ] }, + "ActiveViolation":{ + "type":"structure", + "members":{ + "violationId":{ + "shape":"ViolationId", + "documentation":"

The ID of the active violation.

" + }, + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing responsible for the active violation.

" + }, + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The security profile whose behavior is in violation.

" + }, + "behavior":{ + "shape":"Behavior", + "documentation":"

The behavior which is being violated.

" + }, + "lastViolationValue":{ + "shape":"MetricValue", + "documentation":"

The value of the metric (the measurement) which caused the most recent violation.

" + }, + "lastViolationTime":{ + "shape":"Timestamp", + "documentation":"

The time the most recent violation occurred.

" + }, + "violationStartTime":{ + "shape":"Timestamp", + "documentation":"

The time the violation started.

" + } + }, + "documentation":"

Information about an active Device Defender security profile behavior violation.

" + }, + "ActiveViolations":{ + "type":"list", + "member":{"shape":"ActiveViolation"} + }, "AddThingToThingGroupRequest":{ "type":"structure", "members":{ @@ -2325,6 +2761,35 @@ "value":{"shape":"Value"} }, "AlarmName":{"type":"string"}, + "AlertTarget":{ + "type":"structure", + "required":[ + "alertTargetArn", + "roleArn" + ], + "members":{ + "alertTargetArn":{ + "shape":"AlertTargetArn", + "documentation":"

The ARN of the notification target to which alerts are sent.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role that grants permission to send alerts to the notification target.

" + } + }, + "documentation":"

A structure containing the alert target ARN and the role ARN.

" + }, + "AlertTargetArn":{"type":"string"}, + "AlertTargetType":{ + "type":"string", + "documentation":"

The type of alert target: one of \"SNS\".

", + "enum":["SNS"] + }, + "AlertTargets":{ + "type":"map", + "key":{"shape":"AlertTargetType"}, + "value":{"shape":"AlertTarget"} + }, "AllowAutoRegistration":{"type":"boolean"}, "Allowed":{ "type":"structure", @@ -2418,6 +2883,32 @@ }, "documentation":"

The input for the AttachPrincipalPolicy operation.

" }, + "AttachSecurityProfileRequest":{ + "type":"structure", + "required":[ + "securityProfileName", + "securityProfileTargetArn" + ], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The security profile that is attached.

", + "location":"uri", + "locationName":"securityProfileName" + }, + "securityProfileTargetArn":{ + "shape":"SecurityProfileTargetArn", + "documentation":"

The ARN of the target (thing group) to which the security profile is attached.

", + "location":"querystring", + "locationName":"securityProfileTargetArn" + } + } + }, + "AttachSecurityProfileResponse":{ + "type":"structure", + "members":{ + } + }, "AttachThingPrincipalRequest":{ "type":"structure", "required":[ @@ -2480,6 +2971,206 @@ "key":{"shape":"Key"}, "value":{"shape":"Value"} }, + "AuditCheckConfiguration":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"Enabled", + "documentation":"

True if this audit check is enabled for this account.

" + } + }, + "documentation":"

Which audit checks are enabled and disabled for this account.

" + }, + "AuditCheckConfigurations":{ + "type":"map", + "key":{"shape":"AuditCheckName"}, + "value":{"shape":"AuditCheckConfiguration"} + }, + "AuditCheckDetails":{ + "type":"structure", + "members":{ + "checkRunStatus":{ + "shape":"AuditCheckRunStatus", + "documentation":"

The completion status of this check, one of \"IN_PROGRESS\", \"WAITING_FOR_DATA_COLLECTION\", \"CANCELED\", \"COMPLETED_COMPLIANT\", \"COMPLETED_NON_COMPLIANT\", or \"FAILED\".

" + }, + "checkCompliant":{ + "shape":"CheckCompliant", + "documentation":"

True if the check completed and found all resources compliant.

" + }, + "totalResourcesCount":{ + "shape":"TotalResourcesCount", + "documentation":"

The number of resources on which the check was performed.

" + }, + "nonCompliantResourcesCount":{ + "shape":"NonCompliantResourcesCount", + "documentation":"

The number of resources that the check found non-compliant.

" + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

The code of any error encountered when performing this check during this audit. One of \"INSUFFICIENT_PERMISSIONS\", or \"AUDIT_CHECK_DISABLED\".

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

The message associated with any error encountered when performing this check during this audit.

" + } + }, + "documentation":"

Information about the audit check.

" + }, + "AuditCheckName":{ + "type":"string", + "documentation":"

An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

" + }, + "AuditCheckRunStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "WAITING_FOR_DATA_COLLECTION", + "CANCELED", + "COMPLETED_COMPLIANT", + "COMPLETED_NON_COMPLIANT", + "FAILED" + ] + }, + "AuditDetails":{ + "type":"map", + "key":{"shape":"AuditCheckName"}, + "value":{"shape":"AuditCheckDetails"} + }, + "AuditFinding":{ + "type":"structure", + "members":{ + "taskId":{ + "shape":"AuditTaskId", + "documentation":"

The ID of the audit that generated this result (finding)

" + }, + "checkName":{ + "shape":"AuditCheckName", + "documentation":"

The audit check that generated this result.

" + }, + "taskStartTime":{ + "shape":"Timestamp", + "documentation":"

The time the audit started.

" + }, + "findingTime":{ + "shape":"Timestamp", + "documentation":"

The time the result (finding) was discovered.

" + }, + "severity":{ + "shape":"AuditFindingSeverity", + "documentation":"

The severity of the result (finding).

" + }, + "nonCompliantResource":{ + "shape":"NonCompliantResource", + "documentation":"

The resource that was found to be non-compliant with the audit check.

" + }, + "relatedResources":{ + "shape":"RelatedResources", + "documentation":"

The list of related resources.

" + }, + "reasonForNonCompliance":{ + "shape":"ReasonForNonCompliance", + "documentation":"

The reason the resource was non-compliant.

" + }, + "reasonForNonComplianceCode":{ + "shape":"ReasonForNonComplianceCode", + "documentation":"

A code which indicates the reason that the resource was non-compliant.

" + } + }, + "documentation":"

The findings (results) of the audit.

" + }, + "AuditFindingSeverity":{ + "type":"string", + "enum":[ + "CRITICAL", + "HIGH", + "MEDIUM", + "LOW" + ] + }, + "AuditFindings":{ + "type":"list", + "member":{"shape":"AuditFinding"} + }, + "AuditFrequency":{ + "type":"string", + "enum":[ + "DAILY", + "WEEKLY", + "BIWEEKLY", + "MONTHLY" + ] + }, + "AuditNotificationTarget":{ + "type":"structure", + "members":{ + "targetArn":{ + "shape":"TargetArn", + "documentation":"

The ARN of the target (SNS topic) to which audit notifications are sent.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role that grants permission to send notifications to the target.

" + }, + "enabled":{ + "shape":"Enabled", + "documentation":"

True if notifications to the target are enabled.

" + } + }, + "documentation":"

Information about the targets to which audit notifications are sent.

" + }, + "AuditNotificationTargetConfigurations":{ + "type":"map", + "key":{"shape":"AuditNotificationType"}, + "value":{"shape":"AuditNotificationTarget"} + }, + "AuditNotificationType":{ + "type":"string", + "enum":["SNS"] + }, + "AuditTaskId":{ + "type":"string", + "max":40, + "min":1, + "pattern":"[a-zA-Z0-9\\-]+" + }, + "AuditTaskMetadata":{ + "type":"structure", + "members":{ + "taskId":{ + "shape":"AuditTaskId", + "documentation":"

The ID of this audit.

" + }, + "taskStatus":{ + "shape":"AuditTaskStatus", + "documentation":"

The status of this audit: one of \"IN_PROGRESS\", \"COMPLETED\", \"FAILED\" or \"CANCELED\".

" + }, + "taskType":{ + "shape":"AuditTaskType", + "documentation":"

The type of this audit: one of \"ON_DEMAND_AUDIT_TASK\" or \"SCHEDULED_AUDIT_TASK\".

" + } + }, + "documentation":"

The audits that were performed.

" + }, + "AuditTaskMetadataList":{ + "type":"list", + "member":{"shape":"AuditTaskMetadata"} + }, + "AuditTaskStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "FAILED", + "CANCELED" + ] + }, + "AuditTaskType":{ + "type":"string", + "enum":[ + "ON_DEMAND_AUDIT_TASK", + "SCHEDULED_AUDIT_TASK" + ] + }, "AuthDecision":{ "type":"string", "enum":[ @@ -2618,12 +3309,63 @@ }, "AwsAccountId":{ "type":"string", - "pattern":"[0-9]{12}" + "max":12, + "min":12, + "pattern":"[0-9]+" }, "AwsArn":{"type":"string"}, "AwsIotJobArn":{"type":"string"}, "AwsIotJobId":{"type":"string"}, "AwsIotSqlVersion":{"type":"string"}, + "Behavior":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"BehaviorName", + "documentation":"

The name you have given to the behavior.

" + }, + "metric":{ + "shape":"BehaviorMetric", + "documentation":"

What is measured by the behavior.

" + }, + "criteria":{ + "shape":"BehaviorCriteria", + "documentation":"

The criteria that determine if a device is behaving normally in regard to the metric.

" + } + }, + "documentation":"

A Device Defender security profile behavior.

" + }, + "BehaviorCriteria":{ + "type":"structure", + "members":{ + "comparisonOperator":{ + "shape":"ComparisonOperator", + "documentation":"

The operator that relates the thing measured (metric) to the criteria (value).

" + }, + "value":{ + "shape":"MetricValue", + "documentation":"

The value to be compared with the metric.

" + }, + "durationSeconds":{ + "shape":"DurationSeconds", + "documentation":"

Use this to specify the period of time over which the behavior is evaluated, for those criteria which have a time dimension (for example, NUM_MESSAGES_SENT).

" + } + }, + "documentation":"

The criteria by which the behavior is determined to be normal.

" + }, + "BehaviorMetric":{"type":"string"}, + "BehaviorName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, + "Behaviors":{ + "type":"list", + "member":{"shape":"Behavior"}, + "max":100 + }, "Boolean":{"type":"boolean"}, "BucketName":{"type":"string"}, "CACertificate":{ @@ -2690,6 +3432,10 @@ "generationId":{ "shape":"GenerationId", "documentation":"

The generation ID of the CA certificate.

" + }, + "validity":{ + "shape":"CertificateValidity", + "documentation":"

When the CA certificate is valid.

" } }, "documentation":"

Describes a CA certificate.

" @@ -2705,6 +3451,23 @@ "type":"list", "member":{"shape":"CACertificate"} }, + "CancelAuditTaskRequest":{ + "type":"structure", + "required":["taskId"], + "members":{ + "taskId":{ + "shape":"AuditTaskId", + "documentation":"

The ID of the audit you want to cancel. You can only cancel an audit that is \"IN_PROGRESS\".

", + "location":"uri", + "locationName":"taskId" + } + } + }, + "CancelAuditTaskResponse":{ + "type":"structure", + "members":{ + } + }, "CancelCertificateTransferRequest":{ "type":"structure", "required":["certificateId"], @@ -2792,6 +3555,7 @@ } } }, + "CanceledChecksCount":{"type":"integer"}, "CanceledThings":{"type":"integer"}, "CannedAccessControlList":{ "type":"string", @@ -2891,6 +3655,10 @@ "generationId":{ "shape":"GenerationId", "documentation":"

The generation ID of the certificate.

" + }, + "validity":{ + "shape":"CertificateValidity", + "documentation":"

When the certificate is valid.

" } }, "documentation":"

Describes a certificate.

" @@ -2947,11 +3715,36 @@ "error":{"httpStatusCode":400}, "exception":true }, + "CertificateValidity":{ + "type":"structure", + "members":{ + "notBefore":{ + "shape":"DateType", + "documentation":"

The certificate is not valid before this date.

" + }, + "notAfter":{ + "shape":"DateType", + "documentation":"

The certificate is not valid after this date.

" + } + }, + "documentation":"

When the certificate is valid.

" + }, "Certificates":{ "type":"list", "member":{"shape":"Certificate"} }, "ChannelName":{"type":"string"}, + "CheckCompliant":{"type":"boolean"}, + "Cidr":{ + "type":"string", + "max":43, + "min":2, + "pattern":"[a-fA-F0-9:\\.\\/]+" + }, + "Cidrs":{ + "type":"list", + "member":{"shape":"Cidr"} + }, "ClearDefaultAuthorizerRequest":{ "type":"structure", "members":{ @@ -3006,23 +3799,23 @@ "documentation":"

The IAM role that allows access to the CloudWatch metric.

" }, "metricNamespace":{ - "shape":"MetricNamespace", + "shape":"String", "documentation":"

The CloudWatch metric namespace name.

" }, "metricName":{ - "shape":"MetricName", + "shape":"String", "documentation":"

The CloudWatch metric name.

" }, "metricValue":{ - "shape":"MetricValue", + "shape":"String", "documentation":"

The CloudWatch metric value.

" }, "metricUnit":{ - "shape":"MetricUnit", + "shape":"String", "documentation":"

The metric unit supported by CloudWatch.

" }, "metricTimestamp":{ - "shape":"MetricTimestamp", + "shape":"String", "documentation":"

An optional Unix timestamp.

" } }, @@ -3081,6 +3874,20 @@ "max":2028, "pattern":"[^\\p{C}]+" }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "less-than", + "less-than-equals", + "greater-than", + "greater-than-equals", + "in-cidr-set", + "not-in-cidr-set", + "in-port-set", + "not-in-port-set" + ] + }, + "CompliantChecksCount":{"type":"integer"}, "Configuration":{ "type":"structure", "members":{ @@ -3094,7 +3901,10 @@ "ConflictingResourceUpdateException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

A conflicting resource update exception. This exception is thrown when two pending updates cause a conflict.

", "error":{"httpStatusCode":409}, @@ -3222,10 +4032,6 @@ "jobExecutionsRolloutConfig":{ "shape":"JobExecutionsRolloutConfig", "documentation":"

Allows you to create a staged rollout of the job.

" - }, - "documentParameters":{ - "shape":"JobDocumentParameters", - "documentation":"

Parameters for the job document.

" } } }, @@ -3472,6 +4278,87 @@ } } }, + "CreateScheduledAuditRequest":{ + "type":"structure", + "required":[ + "frequency", + "targetCheckNames", + "scheduledAuditName" + ], + "members":{ + "frequency":{ + "shape":"AuditFrequency", + "documentation":"

How often the scheduled audit takes place. Can be one of \"DAILY\", \"WEEKLY\", \"BIWEEKLY\" or \"MONTHLY\". The actual start time of each audit is determined by the system.

" + }, + "dayOfMonth":{ + "shape":"DayOfMonth", + "documentation":"

The day of the month on which the scheduled audit takes place. Can be \"1\" through \"31\" or \"LAST\". This field is required if the \"frequency\" parameter is set to \"MONTHLY\". If days 29-31 are specified, and the month does not have that many days, the audit takes place on the \"LAST\" day of the month.

" + }, + "dayOfWeek":{ + "shape":"DayOfWeek", + "documentation":"

The day of the week on which the scheduled audit takes place. Can be one of \"SUN\", \"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\" or \"SAT\". This field is required if the \"frequency\" parameter is set to \"WEEKLY\" or \"BIWEEKLY\".

" + }, + "targetCheckNames":{ + "shape":"TargetAuditCheckNames", + "documentation":"

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

" + }, + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name you want to give to the scheduled audit. (Max. 128 chars)

", + "location":"uri", + "locationName":"scheduledAuditName" + } + } + }, + "CreateScheduledAuditResponse":{ + "type":"structure", + "members":{ + "scheduledAuditArn":{ + "shape":"ScheduledAuditArn", + "documentation":"

The ARN of the scheduled audit.

" + } + } + }, + "CreateSecurityProfileRequest":{ + "type":"structure", + "required":[ + "securityProfileName", + "behaviors" + ], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name you are giving to the security profile.

", + "location":"uri", + "locationName":"securityProfileName" + }, + "securityProfileDescription":{ + "shape":"SecurityProfileDescription", + "documentation":"

A description of the security profile.

" + }, + "behaviors":{ + "shape":"Behaviors", + "documentation":"

Specifies the behaviors that, when violated by a device (thing), cause an alert.

" + }, + "alertTargets":{ + "shape":"AlertTargets", + "documentation":"

Specifies the destinations to which alerts are sent. (Alerts are always sent to the console.) Alerts are generated when a device (thing) violates a behavior.

" + } + } + }, + "CreateSecurityProfileResponse":{ + "type":"structure", + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name you gave to the security profile.

" + }, + "securityProfileArn":{ + "shape":"SecurityProfileArn", + "documentation":"

The ARN of the security profile.

" + } + } + }, "CreateStreamRequest":{ "type":"structure", "required":[ @@ -3687,6 +4574,38 @@ "min":1 }, "DateType":{"type":"timestamp"}, + "DayOfMonth":{ + "type":"string", + "pattern":"^([1-9]|[12][0-9]|3[01])$|^LAST$" + }, + "DayOfWeek":{ + "type":"string", + "enum":[ + "SUN", + "MON", + "TUE", + "WED", + "THU", + "FRI", + "SAT" + ] + }, + "DeleteAccountAuditConfigurationRequest":{ + "type":"structure", + "members":{ + "deleteScheduledAudits":{ + "shape":"DeleteScheduledAudits", + "documentation":"

If true, all scheduled audits are deleted.

", + "location":"querystring", + "locationName":"deleteScheduledAudits" + } + } + }, + "DeleteAccountAuditConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteAuthorizerRequest":{ "type":"structure", "required":["authorizerName"], @@ -3887,6 +4806,47 @@ "members":{ } }, + "DeleteScheduledAuditRequest":{ + "type":"structure", + "required":["scheduledAuditName"], + "members":{ + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name of the scheduled audit you want to delete.

", + "location":"uri", + "locationName":"scheduledAuditName" + } + } + }, + "DeleteScheduledAuditResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteScheduledAudits":{"type":"boolean"}, + "DeleteSecurityProfileRequest":{ + "type":"structure", + "required":["securityProfileName"], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile to be deleted.

", + "location":"uri", + "locationName":"securityProfileName" + }, + "expectedVersion":{ + "shape":"OptionalVersion", + "documentation":"

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.

", + "location":"querystring", + "locationName":"expectedVersion" + } + } + }, + "DeleteSecurityProfileResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteStreamRequest":{ "type":"structure", "required":["streamId"], @@ -4044,6 +5004,69 @@ "documentation":"

The output for the DeprecateThingType operation.

" }, "DeprecationDate":{"type":"timestamp"}, + "DescribeAccountAuditConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeAccountAuditConfigurationResponse":{ + "type":"structure", + "members":{ + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates and other items as necessary when performing an audit.

On the first call to UpdateAccountAuditConfiguration this parameter is required.

" + }, + "auditNotificationTargetConfigurations":{ + "shape":"AuditNotificationTargetConfigurations", + "documentation":"

Information about the targets to which audit notifications are sent for this account.

" + }, + "auditCheckConfigurations":{ + "shape":"AuditCheckConfigurations", + "documentation":"

Which audit checks are enabled and disabled for this account.

" + } + } + }, + "DescribeAuditTaskRequest":{ + "type":"structure", + "required":["taskId"], + "members":{ + "taskId":{ + "shape":"AuditTaskId", + "documentation":"

The ID of the audit whose information you want to get.

", + "location":"uri", + "locationName":"taskId" + } + } + }, + "DescribeAuditTaskResponse":{ + "type":"structure", + "members":{ + "taskStatus":{ + "shape":"AuditTaskStatus", + "documentation":"

The status of the audit: one of \"IN_PROGRESS\", \"COMPLETED\", \"FAILED\", or \"CANCELED\".

" + }, + "taskType":{ + "shape":"AuditTaskType", + "documentation":"

The type of audit: \"ON_DEMAND_AUDIT_TASK\" or \"SCHEDULED_AUDIT_TASK\".

" + }, + "taskStartTime":{ + "shape":"Timestamp", + "documentation":"

The time the audit started.

" + }, + "taskStatistics":{ + "shape":"TaskStatistics", + "documentation":"

Statistical information about the audit.

" + }, + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name of the scheduled audit (only if the audit was a scheduled audit).

" + }, + "auditDetails":{ + "shape":"AuditDetails", + "documentation":"

Detailed information about each check performed during this audit.

" + } + } + }, "DescribeAuthorizerRequest":{ "type":"structure", "required":["authorizerName"], @@ -4284,6 +5307,96 @@ } } }, + "DescribeScheduledAuditRequest":{ + "type":"structure", + "required":["scheduledAuditName"], + "members":{ + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name of the scheduled audit whose information you want to get.

", + "location":"uri", + "locationName":"scheduledAuditName" + } + } + }, + "DescribeScheduledAuditResponse":{ + "type":"structure", + "members":{ + "frequency":{ + "shape":"AuditFrequency", + "documentation":"

How often the scheduled audit takes place. One of \"DAILY\", \"WEEKLY\", \"BIWEEKLY\" or \"MONTHLY\". The actual start time of each audit is determined by the system.

" + }, + "dayOfMonth":{ + "shape":"DayOfMonth", + "documentation":"

The day of the month on which the scheduled audit takes place. Will be \"1\" through \"31\" or \"LAST\". If days 29-31 are specified, and the month does not have that many days, the audit takes place on the \"LAST\" day of the month.

" + }, + "dayOfWeek":{ + "shape":"DayOfWeek", + "documentation":"

The day of the week on which the scheduled audit takes place. One of \"SUN\", \"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\" or \"SAT\".

" + }, + "targetCheckNames":{ + "shape":"TargetAuditCheckNames", + "documentation":"

Which checks are performed during the scheduled audit. (Note that checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

" + }, + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name of the scheduled audit.

" + }, + "scheduledAuditArn":{ + "shape":"ScheduledAuditArn", + "documentation":"

The ARN of the scheduled audit.

" + } + } + }, + "DescribeSecurityProfileRequest":{ + "type":"structure", + "required":["securityProfileName"], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile whose information you want to get.

", + "location":"uri", + "locationName":"securityProfileName" + } + } + }, + "DescribeSecurityProfileResponse":{ + "type":"structure", + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile.

" + }, + "securityProfileArn":{ + "shape":"SecurityProfileArn", + "documentation":"

The ARN of the security profile.

" + }, + "securityProfileDescription":{ + "shape":"SecurityProfileDescription", + "documentation":"

A description of the security profile (associated with the security profile when it was created or updated).

" + }, + "behaviors":{ + "shape":"Behaviors", + "documentation":"

Specifies the behaviors that, when violated by a device (thing), cause an alert.

" + }, + "alertTargets":{ + "shape":"AlertTargets", + "documentation":"

Where the alerts are sent. (Alerts are always sent to the console.)

" + }, + "version":{ + "shape":"Version", + "documentation":"

The version of the security profile. A new version is generated whenever the security profile is updated.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The time the security profile was created.

" + }, + "lastModifiedDate":{ + "shape":"Timestamp", + "documentation":"

The time the security profile was last modified.

" + } + } + }, "DescribeStreamRequest":{ "type":"structure", "required":["streamId"], @@ -4539,6 +5652,32 @@ }, "documentation":"

The input for the DetachPrincipalPolicy operation.

" }, + "DetachSecurityProfileRequest":{ + "type":"structure", + "required":[ + "securityProfileName", + "securityProfileTargetArn" + ], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The security profile that is detached.

", + "location":"uri", + "locationName":"securityProfileName" + }, + "securityProfileTargetArn":{ + "shape":"SecurityProfileTargetArn", + "documentation":"

The ARN of the thing group from which the security profile is detached.

", + "location":"querystring", + "locationName":"securityProfileTargetArn" + } + } + }, + "DetachSecurityProfileResponse":{ + "type":"structure", + "members":{ + } + }, "DetachThingPrincipalRequest":{ "type":"structure", "required":[ @@ -4598,6 +5737,7 @@ }, "documentation":"

The input for the DisableTopicRuleRequest operation.

" }, + "DurationSeconds":{"type":"integer"}, "DynamoDBAction":{ "type":"structure", "required":[ @@ -4750,6 +5890,7 @@ "Enabled":{"type":"boolean"}, "EndpointAddress":{"type":"string"}, "EndpointType":{"type":"string"}, + "ErrorCode":{"type":"string"}, "ErrorInfo":{ "type":"structure", "members":{ @@ -4783,9 +5924,13 @@ "THING_GROUP_HIERARCHY", "THING_TYPE_ASSOCIATION", "JOB", - "JOB_EXECUTION" + "JOB_EXECUTION", + "POLICY", + "CERTIFICATE", + "CA_CERTIFICATE" ] }, + "ExecutionNamePrefix":{"type":"string"}, "ExecutionNumber":{"type":"long"}, "ExpectedVersion":{"type":"long"}, "ExpiresInSec":{ @@ -4803,6 +5948,7 @@ }, "documentation":"

Information that explicitly denies authorization.

" }, + "FailedChecksCount":{"type":"integer"}, "FailedThings":{"type":"integer"}, "FileId":{ "type":"integer", @@ -5150,6 +6296,7 @@ }, "documentation":"

Information that implicitly denies authorization. When policy doesn't explicitly deny or allow an action on a resource it is considered an implicit deny.

" }, + "InProgressChecksCount":{"type":"integer"}, "InProgressThings":{"type":"integer"}, "IndexName":{ "type":"string", @@ -5164,7 +6311,10 @@ "IndexNotReadyException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

The index is not ready.

", "error":{"httpStatusCode":400}, @@ -5209,7 +6359,10 @@ "InvalidQueryException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

The query is invalid.

", "error":{"httpStatusCode":400}, @@ -5230,7 +6383,10 @@ "InvalidResponseException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

The response is invalid.

", "error":{"httpStatusCode":400}, @@ -5239,7 +6395,10 @@ "InvalidStateTransitionException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

An attempt was made to change to an invalid state, for example by deleting a job or a job execution which is \"IN_PROGRESS\" without setting the force parameter.

", "error":{"httpStatusCode":409}, @@ -5324,10 +6483,6 @@ "jobProcessDetails":{ "shape":"JobProcessDetails", "documentation":"

Details about the job process.

" - }, - "documentParameters":{ - "shape":"JobDocumentParameters", - "documentation":"

The parameters specified for the job document.

" } }, "documentation":"

The Job object contains details about a job.

" @@ -5342,12 +6497,6 @@ "type":"string", "max":32768 }, - "JobDocumentParameters":{ - "type":"map", - "key":{"shape":"ParameterKey"}, - "value":{"shape":"ParameterValue"}, - "max":10 - }, "JobDocumentSource":{ "type":"string", "max":1350, @@ -5670,6 +6819,48 @@ "error":{"httpStatusCode":410}, "exception":true }, + "ListActiveViolationsRequest":{ + "type":"structure", + "members":{ + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing whose active violations are listed.

", + "location":"querystring", + "locationName":"thingName" + }, + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the Device Defender security profile for which violations are listed.

", + "location":"querystring", + "locationName":"securityProfileName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListActiveViolationsResponse":{ + "type":"structure", + "members":{ + "activeViolations":{ + "shape":"ActiveViolations", + "documentation":"

The list of active violations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, "ListAttachedPoliciesRequest":{ "type":"structure", "required":["target"], @@ -5713,6 +6904,110 @@ } } }, + "ListAuditFindingsRequest":{ + "type":"structure", + "members":{ + "taskId":{ + "shape":"AuditTaskId", + "documentation":"

A filter to limit results to the audit with the specified ID. You must specify either the taskId or the startTime and endTime, but not both.

" + }, + "checkName":{ + "shape":"AuditCheckName", + "documentation":"

A filter to limit results to the findings for the specified audit check.

" + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

Information identifying the non-compliant resource.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time. The default is 25.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

A filter to limit results to those found after the specified time. You must specify either the startTime and endTime or the taskId, but not both.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

A filter to limit results to those found before the specified time. You must specify either the startTime and endTime or the taskId, but not both.

" + } + } + }, + "ListAuditFindingsResponse":{ + "type":"structure", + "members":{ + "findings":{ + "shape":"AuditFindings", + "documentation":"

The findings (results) of the audit.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, + "ListAuditTasksRequest":{ + "type":"structure", + "required":[ + "startTime", + "endTime" + ], + "members":{ + "startTime":{ + "shape":"Timestamp", + "documentation":"

The beginning of the time period. Note that audit information is retained for a limited time (180 days). Requesting a start time prior to what is retained results in an \"InvalidRequestException\".

", + "location":"querystring", + "locationName":"startTime" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time period.

", + "location":"querystring", + "locationName":"endTime" + }, + "taskType":{ + "shape":"AuditTaskType", + "documentation":"

A filter to limit the output to the specified type of audit: can be one of \"ON_DEMAND_AUDIT_TASK\" or \"SCHEDULED__AUDIT_TASK\".

", + "location":"querystring", + "locationName":"taskType" + }, + "taskStatus":{ + "shape":"AuditTaskStatus", + "documentation":"

A filter to limit the output to audits with the specified completion status: can be one of \"IN_PROGRESS\", \"COMPLETED\", \"FAILED\" or \"CANCELED\".

", + "location":"querystring", + "locationName":"taskStatus" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time. The default is 25.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAuditTasksResponse":{ + "type":"structure", + "members":{ + "tasks":{ + "shape":"AuditTaskMetadataList", + "documentation":"

The audits that were performed during the specified time period.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, "ListAuthorizersRequest":{ "type":"structure", "members":{ @@ -6277,7 +7572,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6346,6 +7641,109 @@ } } }, + "ListScheduledAuditsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time. The default is 25.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListScheduledAuditsResponse":{ + "type":"structure", + "members":{ + "scheduledAudits":{ + "shape":"ScheduledAuditMetadataList", + "documentation":"

The list of scheduled audits.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, + "ListSecurityProfilesForTargetRequest":{ + "type":"structure", + "required":["securityProfileTargetArn"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time.

", + "location":"querystring", + "locationName":"maxResults" + }, + "recursive":{ + "shape":"Recursive", + "documentation":"

If true, return child groups as well.

", + "location":"querystring", + "locationName":"recursive" + }, + "securityProfileTargetArn":{ + "shape":"SecurityProfileTargetArn", + "documentation":"

The ARN of the target (thing group) whose attached security profiles you want to get.

", + "location":"querystring", + "locationName":"securityProfileTargetArn" + } + } + }, + "ListSecurityProfilesForTargetResponse":{ + "type":"structure", + "members":{ + "securityProfileTargetMappings":{ + "shape":"SecurityProfileTargetMappings", + "documentation":"

A list of security profiles and their associated targets.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, + "ListSecurityProfilesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSecurityProfilesResponse":{ + "type":"structure", + "members":{ + "securityProfileIdentifiers":{ + "shape":"SecurityProfileIdentifiers", + "documentation":"

A list of security profile identifiers (names and ARNs).

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, "ListStreamsRequest":{ "type":"structure", "members":{ @@ -6419,6 +7817,43 @@ } } }, + "ListTargetsForSecurityProfileRequest":{ + "type":"structure", + "required":["securityProfileName"], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The security profile.

", + "location":"uri", + "locationName":"securityProfileName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTargetsForSecurityProfileResponse":{ + "type":"structure", + "members":{ + "securityProfileTargets":{ + "shape":"SecurityProfileTargets", + "documentation":"

The thing groups to which the security profile is attached.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, "ListThingGroupsForThingRequest":{ "type":"structure", "required":["thingName"], @@ -6431,7 +7866,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6461,7 +7896,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6573,7 +8008,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

" + "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" } } }, @@ -6582,7 +8017,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6618,7 +8053,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6669,7 +8104,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6699,7 +8134,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

The token to retrieve the next set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -6824,6 +8259,64 @@ } } }, + "ListViolationEventsRequest":{ + "type":"structure", + "required":[ + "startTime", + "endTime" + ], + "members":{ + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time for the alerts to be listed.

", + "location":"querystring", + "locationName":"startTime" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end time for the alerts to be listed.

", + "location":"querystring", + "locationName":"endTime" + }, + "thingName":{ + "shape":"ThingName", + "documentation":"

A filter to limit results to those alerts caused by the specified thing.

", + "location":"querystring", + "locationName":"thingName" + }, + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

A filter to limit results to those alerts generated by the specified security profile.

", + "location":"querystring", + "locationName":"securityProfileName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListViolationEventsResponse":{ + "type":"structure", + "members":{ + "violationEvents":{ + "shape":"ViolationEvents", + "documentation":"

The security profile violation alerts issued for this account during the given time frame, potentially filtered by security profile, behavior violated, or thing (device) violating.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, "LogLevel":{ "type":"string", "enum":[ @@ -6927,21 +8420,57 @@ "JSON" ] }, - "MetricName":{"type":"string"}, - "MetricNamespace":{"type":"string"}, - "MetricTimestamp":{"type":"string"}, - "MetricUnit":{"type":"string"}, - "MetricValue":{"type":"string"}, + "MetricValue":{ + "type":"structure", + "members":{ + "count":{ + "shape":"UnsignedLong", + "documentation":"

If the comparisonOperator calls for a numeric value, use this to specify that numeric value to be compared with the metric.

" + }, + "cidrs":{ + "shape":"Cidrs", + "documentation":"

If the comparisonOperator calls for a set of CIDRs, use this to specify that set to be compared with the metric.

" + }, + "ports":{ + "shape":"Ports", + "documentation":"

If the comparisonOperator calls for a set of ports, use this to specify that set to be compared with the metric.

" + } + }, + "documentation":"

The value to be compared with the metric.

" + }, "MissingContextValue":{"type":"string"}, "MissingContextValues":{ "type":"list", "member":{"shape":"MissingContextValue"} }, "NextToken":{"type":"string"}, + "NonCompliantChecksCount":{"type":"integer"}, + "NonCompliantResource":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the non-compliant resource.

" + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

Information identifying the non-compliant resource.

" + }, + "additionalInfo":{ + "shape":"StringMap", + "documentation":"

Additional information about the non-compliant resource.

" + } + }, + "documentation":"

Information about the resource that was non-compliant with the audit check.

" + }, + "NonCompliantResourcesCount":{"type":"long"}, "NotConfiguredException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

The resource is not configured.

", "error":{"httpStatusCode":404}, @@ -7123,18 +8652,6 @@ "min":1 }, "Parameter":{"type":"string"}, - "ParameterKey":{ - "type":"string", - "max":128, - "min":1, - "pattern":"[a-zA-Z0-9:_-]+" - }, - "ParameterValue":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[^\\p{C}]+" - }, "Parameters":{ "type":"map", "key":{"shape":"Parameter"}, @@ -7208,10 +8725,33 @@ "type":"string", "pattern":"[0-9]+" }, + "PolicyVersionIdentifier":{ + "type":"structure", + "members":{ + "policyName":{ + "shape":"PolicyName", + "documentation":"

The name of the policy.

" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "documentation":"

The ID of the version of the policy associated with the resource.

" + } + }, + "documentation":"

Information about the version of the policy associated with the resource.

" + }, "PolicyVersions":{ "type":"list", "member":{"shape":"PolicyVersion"} }, + "Port":{ + "type":"integer", + "max":65535, + "min":0 + }, + "Ports":{ + "type":"list", + "member":{"shape":"Port"} + }, "PresignedUrlConfig":{ "type":"structure", "members":{ @@ -7283,6 +8823,8 @@ "QueuedThings":{"type":"integer"}, "RangeKeyField":{"type":"string"}, "RangeKeyValue":{"type":"string"}, + "ReasonForNonCompliance":{"type":"string"}, + "ReasonForNonComplianceCode":{"type":"string"}, "Recursive":{"type":"boolean"}, "RecursiveWithoutDefault":{"type":"boolean"}, "RegisterCACertificateRequest":{ @@ -7390,7 +8932,10 @@ "RegisterThingResponse":{ "type":"structure", "members":{ - "certificatePem":{"shape":"CertificatePem"}, + "certificatePem":{ + "shape":"CertificatePem", + "documentation":"

.

" + }, "resourceArns":{ "shape":"ResourceArns", "documentation":"

ARNs for the generated resources.

" @@ -7464,6 +9009,28 @@ "documentation":"

The input for the RejectCertificateTransfer operation.

" }, "RejectedThings":{"type":"integer"}, + "RelatedResource":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

Information identifying the resource.

" + }, + "additionalInfo":{ + "shape":"StringMap", + "documentation":"

Additional information about the resource.

" + } + }, + "documentation":"

Information about a related resource.

" + }, + "RelatedResources":{ + "type":"list", + "member":{"shape":"RelatedResource"} + }, "RemoveAutoRegistration":{"type":"boolean"}, "RemoveThingFromThingGroupRequest":{ "type":"structure", @@ -7566,6 +9133,36 @@ "key":{"shape":"ResourceLogicalId"}, "value":{"shape":"ResourceArn"} }, + "ResourceIdentifier":{ + "type":"structure", + "members":{ + "deviceCertificateId":{ + "shape":"CertificateId", + "documentation":"

The ID of the certificate attached to the resource.

" + }, + "caCertificateId":{ + "shape":"CertificateId", + "documentation":"

The ID of the CA certificate used to authorize the certificate.

" + }, + "cognitoIdentityPoolId":{ + "shape":"CognitoIdentityPoolId", + "documentation":"

The ID of the Cognito Identity Pool.

" + }, + "clientId":{ + "shape":"ClientId", + "documentation":"

The client ID.

" + }, + "policyVersionIdentifier":{ + "shape":"PolicyVersionIdentifier", + "documentation":"

The version of the policy associated with the resource.

" + }, + "account":{ + "shape":"AwsAccountId", + "documentation":"

The account with which the resource is associated.

" + } + }, + "documentation":"

Information identifying the non-compliant resource.

" + }, "ResourceLogicalId":{"type":"string"}, "ResourceNotFoundException":{ "type":"structure", @@ -7582,12 +9179,26 @@ "ResourceRegistrationFailureException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{ + "shape":"errorMessage", + "documentation":"

The message for the exception.

" + } }, "documentation":"

The resource registration failed.

", "error":{"httpStatusCode":400}, "exception":true }, + "ResourceType":{ + "type":"string", + "enum":[ + "DEVICE_CERTIFICATE", + "CA_CERTIFICATE", + "IOT_POLICY", + "COGNITO_IDENTITY_POOL", + "CLIENT_ID", + "ACCOUNT_SETTINGS" + ] + }, "Resources":{ "type":"list", "member":{"shape":"Resource"} @@ -7743,6 +9354,43 @@ "type":"string", "min":40 }, + "ScheduledAuditArn":{"type":"string"}, + "ScheduledAuditMetadata":{ + "type":"structure", + "members":{ + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name of the scheduled audit.

" + }, + "scheduledAuditArn":{ + "shape":"ScheduledAuditArn", + "documentation":"

The ARN of the scheduled audit.

" + }, + "frequency":{ + "shape":"AuditFrequency", + "documentation":"

How often the scheduled audit takes place.

" + }, + "dayOfMonth":{ + "shape":"DayOfMonth", + "documentation":"

The day of the month on which the scheduled audit is run (if the frequency is \"MONTHLY\"). If days 29-31 are specified, and the month does not have that many days, the audit takes place on the \"LAST\" day of the month.

" + }, + "dayOfWeek":{ + "shape":"DayOfWeek", + "documentation":"

The day of the week on which the scheduled audit is run (if the frequency is \"WEEKLY\" or \"BIWEEKLY\").

" + } + }, + "documentation":"

Information about the scheduled audit.

" + }, + "ScheduledAuditMetadataList":{ + "type":"list", + "member":{"shape":"ScheduledAuditMetadata"} + }, + "ScheduledAuditName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, "SearchIndexRequest":{ "type":"structure", "required":["queryString"], @@ -7787,6 +9435,74 @@ "member":{"shape":"AttributeName"} }, "Seconds":{"type":"integer"}, + "SecurityProfileArn":{"type":"string"}, + "SecurityProfileDescription":{ + "type":"string", + "max":1000, + "pattern":"[\\p{Graph}\\x20]*" + }, + "SecurityProfileIdentifier":{ + "type":"structure", + "required":[ + "name", + "arn" + ], + "members":{ + "name":{ + "shape":"SecurityProfileName", + "documentation":"

The name you have given to the security profile.

" + }, + "arn":{ + "shape":"SecurityProfileArn", + "documentation":"

The ARN of the security profile.

" + } + }, + "documentation":"

Identifying information for a Device Defender security profile.

" + }, + "SecurityProfileIdentifiers":{ + "type":"list", + "member":{"shape":"SecurityProfileIdentifier"} + }, + "SecurityProfileName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, + "SecurityProfileTarget":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"SecurityProfileTargetArn", + "documentation":"

The ARN of the security profile.

" + } + }, + "documentation":"

A target to which an alert is sent when a security profile behavior is violated.

" + }, + "SecurityProfileTargetArn":{"type":"string"}, + "SecurityProfileTargetMapping":{ + "type":"structure", + "members":{ + "securityProfileIdentifier":{ + "shape":"SecurityProfileIdentifier", + "documentation":"

Information that identifies the security profile.

" + }, + "target":{ + "shape":"SecurityProfileTarget", + "documentation":"

Information about the target (thing group) associated with the security profile.

" + } + }, + "documentation":"

Information about a security profile and the target associated with it.

" + }, + "SecurityProfileTargetMappings":{ + "type":"list", + "member":{"shape":"SecurityProfileTargetMapping"} + }, + "SecurityProfileTargets":{ + "type":"list", + "member":{"shape":"SecurityProfileTarget"} + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -7882,7 +9598,7 @@ "members":{ "roleArn":{ "shape":"AwsArn", - "documentation":"

The role ARN that allows IoT to write to Cloudwatch logs.

" + "documentation":"

The ARN of the role that allows IoT to write to Cloudwatch logs.

" }, "defaultLogLevel":{ "shape":"LogLevel", @@ -7890,7 +9606,7 @@ }, "disableAllLogs":{ "shape":"DisableAllLogs", - "documentation":"

Set to true to disable all logs, otherwise set to false.

" + "documentation":"

If true all logs are disabled. The default is false.

" } } }, @@ -7958,6 +9674,25 @@ }, "documentation":"

Describes an action to publish data to an Amazon SQS queue.

" }, + "StartOnDemandAuditTaskRequest":{ + "type":"structure", + "required":["targetCheckNames"], + "members":{ + "targetCheckNames":{ + "shape":"TargetAuditCheckNames", + "documentation":"

Which checks are performed during the audit. The checks you specify must be enabled for your account or an exception occurs. Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.

" + } + } + }, + "StartOnDemandAuditTaskResponse":{ + "type":"structure", + "members":{ + "taskId":{ + "shape":"AuditTaskId", + "documentation":"

The ID of the on-demand audit you started.

" + } + } + }, "StartThingRegistrationTaskRequest":{ "type":"structure", "required":[ @@ -7994,6 +9729,7 @@ } } }, + "StateMachineName":{"type":"string"}, "StateReason":{"type":"string"}, "StateValue":{"type":"string"}, "Status":{ @@ -8006,6 +9742,28 @@ "Cancelling" ] }, + "StepFunctionsAction":{ + "type":"structure", + "required":[ + "stateMachineName", + "roleArn" + ], + "members":{ + "executionNamePrefix":{ + "shape":"ExecutionNamePrefix", + "documentation":"

(Optional) A name will be given to the state machine execution consisting of this prefix followed by a UUID. Step Functions automatically creates a unique name for each state machine execution if one is not provided.

" + }, + "stateMachineName":{ + "shape":"StateMachineName", + "documentation":"

The name of the Step Functions state machine whose execution will be started.

" + }, + "roleArn":{ + "shape":"AwsArn", + "documentation":"

The ARN of the role that grants IoT permission to start execution of a state machine (\"Action\":\"states:StartExecution\").

" + } + }, + "documentation":"

Starts execution of a Step Functions state machine.

" + }, "StopThingRegistrationTaskRequest":{ "type":"structure", "required":["taskId"], @@ -8139,10 +9897,20 @@ "type":"list", "member":{"shape":"StreamSummary"} }, + "String":{"type":"string"}, + "StringMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "SucceededThings":{"type":"integer"}, "TableName":{"type":"string"}, "Target":{"type":"string"}, "TargetArn":{"type":"string"}, + "TargetAuditCheckNames":{ + "type":"list", + "member":{"shape":"AuditCheckName"} + }, "TargetSelection":{ "type":"string", "enum":[ @@ -8163,6 +9931,40 @@ "type":"list", "member":{"shape":"TaskId"} }, + "TaskStatistics":{ + "type":"structure", + "members":{ + "totalChecks":{ + "shape":"TotalChecksCount", + "documentation":"

The number of checks in this audit.

" + }, + "inProgressChecks":{ + "shape":"InProgressChecksCount", + "documentation":"

The number of checks in progress.

" + }, + "waitingForDataCollectionChecks":{ + "shape":"WaitingForDataCollectionChecksCount", + "documentation":"

The number of checks waiting for data collection.

" + }, + "compliantChecks":{ + "shape":"CompliantChecksCount", + "documentation":"

The number of checks that found compliant resources.

" + }, + "nonCompliantChecks":{ + "shape":"NonCompliantChecksCount", + "documentation":"

The number of checks that found non-compliant resources.

" + }, + "failedChecks":{ + "shape":"FailedChecksCount", + "documentation":"

The number of checks

" + }, + "canceledChecks":{ + "shape":"CanceledChecksCount", + "documentation":"

The number of checks that did not run because the audit was canceled.

" + } + }, + "documentation":"

Statistics for the checks performed during the audit.

" + }, "TemplateBody":{"type":"string"}, "TestAuthorizationRequest":{ "type":"structure", @@ -8493,9 +10295,10 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Timestamp":{"type":"timestamp"}, "Token":{ "type":"string", - "max":1024, + "max":6144, "min":1 }, "TokenKeyName":{ @@ -8614,6 +10417,8 @@ }, "documentation":"

Describes a rule.

" }, + "TotalChecksCount":{"type":"integer"}, + "TotalResourcesCount":{"type":"long"}, "TransferAlreadyCompletedException":{ "type":"structure", "members":{ @@ -8713,6 +10518,32 @@ "exception":true }, "UndoDeprecate":{"type":"boolean"}, + "UnsignedLong":{ + "type":"long", + "min":0 + }, + "UpdateAccountAuditConfigurationRequest":{ + "type":"structure", + "members":{ + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role that grants permission to AWS IoT to access information about your devices, policies, certificates and other items as necessary when performing an audit.

" + }, + "auditNotificationTargetConfigurations":{ + "shape":"AuditNotificationTargetConfigurations", + "documentation":"

Information about the targets to which audit notifications are sent.

" + }, + "auditCheckConfigurations":{ + "shape":"AuditCheckConfigurations", + "documentation":"

Specifies which audit checks are enabled and disabled for this account. Use DescribeAccountAuditConfiguration to see the list of all checks including those that are currently enabled.

Note that some data collection may begin immediately when certain checks are enabled. When a check is disabled, any data collected so far in relation to the check is deleted.

You cannot disable a check if it is used by any scheduled audit. You must first delete the check from the scheduled audit or delete the scheduled audit itself.

On the first call to UpdateAccountAuditConfiguration this parameter is required and must specify at least one enabled check.

" + } + } + }, + "UpdateAccountAuditConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateAuthorizerRequest":{ "type":"structure", "required":["authorizerName"], @@ -8870,6 +10701,110 @@ } } }, + "UpdateScheduledAuditRequest":{ + "type":"structure", + "required":["scheduledAuditName"], + "members":{ + "frequency":{ + "shape":"AuditFrequency", + "documentation":"

How often the scheduled audit takes place. Can be one of \"DAILY\", \"WEEKLY\", \"BIWEEKLY\" or \"MONTHLY\". The actual start time of each audit is determined by the system.

" + }, + "dayOfMonth":{ + "shape":"DayOfMonth", + "documentation":"

The day of the month on which the scheduled audit takes place. Can be \"1\" through \"31\" or \"LAST\". This field is required if the \"frequency\" parameter is set to \"MONTHLY\". If days 29-31 are specified, and the month does not have that many days, the audit takes place on the \"LAST\" day of the month.

" + }, + "dayOfWeek":{ + "shape":"DayOfWeek", + "documentation":"

The day of the week on which the scheduled audit takes place. Can be one of \"SUN\", \"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\" or \"SAT\". This field is required if the \"frequency\" parameter is set to \"WEEKLY\" or \"BIWEEKLY\".

" + }, + "targetCheckNames":{ + "shape":"TargetAuditCheckNames", + "documentation":"

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

" + }, + "scheduledAuditName":{ + "shape":"ScheduledAuditName", + "documentation":"

The name of the scheduled audit. (Max. 128 chars)

", + "location":"uri", + "locationName":"scheduledAuditName" + } + } + }, + "UpdateScheduledAuditResponse":{ + "type":"structure", + "members":{ + "scheduledAuditArn":{ + "shape":"ScheduledAuditArn", + "documentation":"

The ARN of the scheduled audit.

" + } + } + }, + "UpdateSecurityProfileRequest":{ + "type":"structure", + "required":["securityProfileName"], + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile you want to update.

", + "location":"uri", + "locationName":"securityProfileName" + }, + "securityProfileDescription":{ + "shape":"SecurityProfileDescription", + "documentation":"

A description of the security profile.

" + }, + "behaviors":{ + "shape":"Behaviors", + "documentation":"

Specifies the behaviors that, when violated by a device (thing), cause an alert.

" + }, + "alertTargets":{ + "shape":"AlertTargets", + "documentation":"

Where the alerts are sent. (Alerts are always sent to the console.)

" + }, + "expectedVersion":{ + "shape":"OptionalVersion", + "documentation":"

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.

", + "location":"querystring", + "locationName":"expectedVersion" + } + } + }, + "UpdateSecurityProfileResponse":{ + "type":"structure", + "members":{ + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile that was updated.

" + }, + "securityProfileArn":{ + "shape":"SecurityProfileArn", + "documentation":"

The ARN of the security profile that was updated.

" + }, + "securityProfileDescription":{ + "shape":"SecurityProfileDescription", + "documentation":"

The description of the security profile.

" + }, + "behaviors":{ + "shape":"Behaviors", + "documentation":"

Specifies the behaviors that, when violated by a device (thing), cause an alert.

" + }, + "alertTargets":{ + "shape":"AlertTargets", + "documentation":"

Where the alerts are sent. (Alerts are always sent to the console.)

" + }, + "version":{ + "shape":"Version", + "documentation":"

The updated version of the security profile.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The time the security profile was created.

" + }, + "lastModifiedDate":{ + "shape":"Timestamp", + "documentation":"

The time the security profile was last modified.

" + } + } + }, "UpdateStreamRequest":{ "type":"structure", "required":["streamId"], @@ -9005,6 +10940,44 @@ "documentation":"

The output from the UpdateThing operation.

" }, "UseBase64":{"type":"boolean"}, + "Valid":{"type":"boolean"}, + "ValidateSecurityProfileBehaviorsRequest":{ + "type":"structure", + "required":["behaviors"], + "members":{ + "behaviors":{ + "shape":"Behaviors", + "documentation":"

Specifies the behaviors that, when violated by a device (thing), cause an alert.

" + } + } + }, + "ValidateSecurityProfileBehaviorsResponse":{ + "type":"structure", + "members":{ + "valid":{ + "shape":"Valid", + "documentation":"

True if the behaviors were valid.

" + }, + "validationErrors":{ + "shape":"ValidationErrors", + "documentation":"

The list of any errors found in the behaviors.

" + } + } + }, + "ValidationError":{ + "type":"structure", + "members":{ + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The description of an error found in the behaviors.

" + } + }, + "documentation":"

Information about an error found in a behavior specification.

" + }, + "ValidationErrors":{ + "type":"list", + "member":{"shape":"ValidationError"} + }, "Value":{"type":"string"}, "Version":{"type":"long"}, "VersionConflictException":{ @@ -9032,9 +11005,62 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ViolationEvent":{ + "type":"structure", + "members":{ + "violationId":{ + "shape":"ViolationId", + "documentation":"

The ID of the violation event.

" + }, + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing responsible for the violation event.

" + }, + "securityProfileName":{ + "shape":"SecurityProfileName", + "documentation":"

The name of the security profile whose behavior was violated.

" + }, + "behavior":{ + "shape":"Behavior", + "documentation":"

The behavior which was violated.

" + }, + "metricValue":{ + "shape":"MetricValue", + "documentation":"

The value of the metric (the measurement).

" + }, + "violationEventType":{ + "shape":"ViolationEventType", + "documentation":"

The type of violation event.

" + }, + "violationEventTime":{ + "shape":"Timestamp", + "documentation":"

The time the violation event occurred.

" + } + }, + "documentation":"

Information about a Device Defender security profile behavior violation.

" + }, + "ViolationEventType":{ + "type":"string", + "enum":[ + "in-alarm", + "alarm-cleared", + "alarm-invalidated" + ] + }, + "ViolationEvents":{ + "type":"list", + "member":{"shape":"ViolationEvent"} + }, + "ViolationId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9\\-]+" + }, + "WaitingForDataCollectionChecksCount":{"type":"integer"}, "errorMessage":{"type":"string"}, "resourceArn":{"type":"string"}, "resourceId":{"type":"string"} }, - "documentation":"AWS IoT

AWS IoT provides secure, bi-directional communication between Internet-connected devices (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each device (Registry), configure logging, and create and manage policies and credentials to authenticate devices.

For more information about how AWS IoT works, see the Developer Guide.

" + "documentation":"AWS IoT

AWS IoT provides secure, bi-directional communication between Internet-connected devices (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each device (Registry), configure logging, and create and manage policies and credentials to authenticate devices.

For more information about how AWS IoT works, see the Developer Guide.

For information about how to use the credentials provider for AWS IoT, see Authorizing Direct Calls to AWS Services.

" } diff --git a/botocore/data/iotanalytics/2017-11-27/service-2.json b/botocore/data/iotanalytics/2017-11-27/service-2.json index f0b04f31..a047fd12 100644 --- a/botocore/data/iotanalytics/2017-11-27/service-2.json +++ b/botocore/data/iotanalytics/2017-11-27/service-2.json @@ -761,6 +761,16 @@ "min":1, "pattern":"^[a-zA-Z0-9_]+$" }, + "ChannelStatistics":{ + "type":"structure", + "members":{ + "size":{ + "shape":"EstimatedResourceSize", + "documentation":"

The estimated size of the channel.

" + } + }, + "documentation":"

Statistics information about the channel.

" + }, "ChannelStatus":{ "type":"string", "enum":[ @@ -1162,6 +1172,16 @@ "min":1, "pattern":"^[a-zA-Z0-9_]+$" }, + "DatastoreStatistics":{ + "type":"structure", + "members":{ + "size":{ + "shape":"EstimatedResourceSize", + "documentation":"

The estimated size of the data store.

" + } + }, + "documentation":"

Statistics information about the data store.

" + }, "DatastoreStatus":{ "type":"string", "enum":[ @@ -1271,6 +1291,12 @@ "documentation":"

The name of the channel whose information is retrieved.

", "location":"uri", "locationName":"channelName" + }, + "includeStatistics":{ + "shape":"IncludeStatisticsFlag", + "documentation":"

If true, include statistics about the channel in the response.

", + "location":"querystring", + "locationName":"includeStatistics" } } }, @@ -1280,6 +1306,10 @@ "channel":{ "shape":"Channel", "documentation":"

An object that contains information about the channel.

" + }, + "statistics":{ + "shape":"ChannelStatistics", + "documentation":"

Statistics about the channel. Included if the 'includeStatistics' parameter is set to true in the request.

" } } }, @@ -1313,6 +1343,12 @@ "documentation":"

The name of the data store

", "location":"uri", "locationName":"datastoreName" + }, + "includeStatistics":{ + "shape":"IncludeStatisticsFlag", + "documentation":"

If true, include statistics about the data store in the response.

", + "location":"querystring", + "locationName":"includeStatistics" } } }, @@ -1322,6 +1358,10 @@ "datastore":{ "shape":"Datastore", "documentation":"

Information about the data store.

" + }, + "statistics":{ + "shape":"DatastoreStatistics", + "documentation":"

Statistics about the data store. Included if the 'includeStatistics' parameter is set to true in the request.

" } } }, @@ -1428,6 +1468,20 @@ "EntryName":{"type":"string"}, "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, + "EstimatedResourceSize":{ + "type":"structure", + "members":{ + "estimatedSizeInBytes":{ + "shape":"SizeInBytes", + "documentation":"

The estimated size of the resource in bytes.

" + }, + "estimatedOn":{ + "shape":"Timestamp", + "documentation":"

The time when the estimate of the size of the resource was made.

" + } + }, + "documentation":"

The estimated size of the resource.

" + }, "FilterActivity":{ "type":"structure", "required":[ @@ -1490,6 +1544,7 @@ } } }, + "IncludeStatisticsFlag":{"type":"boolean"}, "InternalFailureException":{ "type":"structure", "members":{ @@ -1776,7 +1831,7 @@ }, "payload":{ "shape":"MessagePayload", - "documentation":"

The payload of the message.

" + "documentation":"

The payload of the message. This may be a JSON string or a Base64-encoded string representing binary data (in which case you must decode it by means of a pipeline activity).

" } }, "documentation":"

Information about a message.

" @@ -2145,6 +2200,7 @@ "exception":true, "fault":true }, + "SizeInBytes":{"type":"double"}, "SqlQuery":{"type":"string"}, "SqlQueryDatasetAction":{ "type":"structure", diff --git a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json index 91f02344..78176e37 100644 --- a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json +++ b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json @@ -11,6 +11,26 @@ "uid":"kinesis-video-archived-media-2017-09-30" }, "operations":{ + "GetHLSStreamingSessionURL":{ + "name":"GetHLSStreamingSessionURL", + "http":{ + "method":"POST", + "requestUri":"/getHLSStreamingSessionURL" + }, + "input":{"shape":"GetHLSStreamingSessionURLInput"}, + "output":{"shape":"GetHLSStreamingSessionURLOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ClientLimitExceededException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"UnsupportedStreamMediaTypeException"}, + {"shape":"NoDataRetentionException"}, + {"shape":"MissingCodecPrivateDataException"}, + {"shape":"InvalidCodecPrivateDataException"} + ], + "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

You must specify either the StreamName or the StreamARN.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF), rather than the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bit rates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video decoder and renderer. The media fragments contain H.264-encoded video frames and time stamps.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action, and additional metadata for the media player, including estimated bit rate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream, which the media player needs to decode video frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's video frames and their time stamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data are excluded in the HLS media playlist. Therefore, the codec private data does not change between fragments in a session.

      Data retrieved with this action is billable. See Pricing for details.

The following restrictions apply to HLS sessions:

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

" + }, "GetMediaForFragmentList":{ "name":"GetMediaForFragmentList", "http":{ @@ -25,7 +45,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in a Kinesis video stream.

This operation is only available for the AWS SDK for Java. It is not supported in AWS SDKs for other languages.

The following limits apply when using the GetMediaForFragmentList API:

" + "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

The following limits apply when using the GetMediaForFragmentList API:

" }, "ListFragments":{ "name":"ListFragments", @@ -60,7 +80,19 @@ "min":1, "pattern":"^[a-zA-Z0-9_\\.\\-]+$" }, + "DiscontinuityMode":{ + "type":"string", + "enum":[ + "ALWAYS", + "NEVER" + ] + }, "ErrorMessage":{"type":"string"}, + "Expires":{ + "type":"integer", + "max":43200, + "min":300 + }, "Fragment":{ "type":"structure", "members":{ @@ -93,7 +125,9 @@ }, "FragmentNumberList":{ "type":"list", - "member":{"shape":"FragmentNumberString"} + "member":{"shape":"FragmentNumberString"}, + "max":1000, + "min":1 }, "FragmentNumberString":{ "type":"string", @@ -126,6 +160,48 @@ "SERVER_TIMESTAMP" ] }, + "GetHLSStreamingSessionURLInput":{ + "type":"structure", + "members":{ + "StreamName":{ + "shape":"StreamName", + "documentation":"

The name of the stream for which to retrieve the HLS master playlist URL.

You must specify either the StreamName or the StreamARN.

" + }, + "StreamARN":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the stream for which to retrieve the HLS master playlist URL.

You must specify either the StreamName or the StreamARN.

" + }, + "PlaybackMode":{ + "shape":"PlaybackMode", + "documentation":"

Whether to retrieve live or archived, on-demand data.

Features of the two types of session include the following:

In both playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start time stamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different time stamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" + }, + "HLSFragmentSelector":{ + "shape":"HLSFragmentSelector", + "documentation":"

The time range of the requested fragment, and the source of the time stamps.

This parameter is required if PlaybackMode is ON_DEMAND. This parameter is optional if PlaybackMode is LIVE. If PlaybackMode is LIVE, the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND, both FragmentSelectorType and TimestampRange must be set.

" + }, + "DiscontinuityMode":{ + "shape":"DiscontinuityMode", + "documentation":"

Specifies when flags marking discontinuities between fragments will be added to the media playlists. The default is ALWAYS when HLSFragmentSelector is SERVER_TIMESTAMP, and NEVER when it is PRODUCER_TIMESTAMP.

Media players typically build a timeline of media content to play, based on the time stamps of each fragment. This means that if there is any overlap between fragments (as is typical if HLSFragmentSelector is SERVER_TIMESTAMP), the media player timeline has small gaps between fragments in some places, and overwrites frames in other places. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the fragment being played immediately after the previous fragment. We recommend that you always have discontinuity flags between fragments if the fragment time stamps are not accurate or if fragments might be missing. You should not place discontinuity flags between fragments for the player timeline to accurately map to the producer time stamps.

" + }, + "Expires":{ + "shape":"Expires", + "documentation":"

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetHLSMasterPlaylist, GetHLSMediaPlaylist, GetMP4InitFragment, or GetMP4MediaFragment can be made for that session.

The default is 300 (5 minutes).

" + }, + "MaxMediaPlaylistFragmentResults":{ + "shape":"PageLimit", + "documentation":"

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

" + } + } + }, + "GetHLSStreamingSessionURLOutput":{ + "type":"structure", + "members":{ + "HLSStreamingSessionURL":{ + "shape":"HLSStreamingSessionURL", + "documentation":"

The URL (containing the session token) that a media player can use to retrieve the HLS master playlist.

" + } + } + }, "GetMediaForFragmentListInput":{ "type":"structure", "required":[ @@ -154,11 +230,47 @@ }, "Payload":{ "shape":"Payload", - "documentation":"

The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:

The following tags will be included if an exception occurs:

" + "documentation":"

The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:

The following tags will be included if an exception occurs:

" } }, "payload":"Payload" }, + "HLSFragmentSelector":{ + "type":"structure", + "members":{ + "FragmentSelectorType":{ + "shape":"HLSFragmentSelectorType", + "documentation":"

The source of the time stamps for the requested media.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND, the first fragment ingested with a producer time stamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer time stamps within the TimestampRange ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.

Fragments that have duplicate producer time stamps are deduplicated. This means that if producers are producing a stream of fragments with producer time stamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested time stamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE, the producer time stamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server time stamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer time stamps with values now, they are not included in the HLS media playlist.

The default is SERVER_TIMESTAMP.

" + }, + "TimestampRange":{ + "shape":"HLSTimestampRange", + "documentation":"

The start and end of the time stamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

" + } + }, + "documentation":"

Contains the range of time stamps for the requested media, and the source of the time stamps.

" + }, + "HLSFragmentSelectorType":{ + "type":"string", + "enum":[ + "PRODUCER_TIMESTAMP", + "SERVER_TIMESTAMP" + ] + }, + "HLSStreamingSessionURL":{"type":"string"}, + "HLSTimestampRange":{ + "type":"structure", + "members":{ + "StartTimestamp":{ + "shape":"Timestamp", + "documentation":"

The start of the time stamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + }, + "EndTimestamp":{ + "shape":"Timestamp", + "documentation":"

The end of the time stamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

If the HLSTimestampRange value is specified, the EndTimestamp value is required.

This value is inclusive. The EndTimestamp is compared to the (starting) time stamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + } + }, + "documentation":"

The start and end of the time stamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the HLSTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + }, "InvalidArgumentException":{ "type":"structure", "members":{ @@ -168,6 +280,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidCodecPrivateDataException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The Codec Private Data in the video stream is not valid for this operation.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "ListFragmentsInput":{ "type":"structure", "required":["StreamName"], @@ -204,6 +325,24 @@ } }, "Long":{"type":"long"}, + "MissingCodecPrivateDataException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

No Codec Private Data was found in the video stream.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoDataRetentionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

A PlaybackMode of ON_DEMAND was requested for a stream that does not retain data (that is, has a DataRetentionInHours of 0).

", + "error":{"httpStatusCode":400}, + "exception":true + }, "NotAuthorizedException":{ "type":"structure", "members":{ @@ -222,12 +361,25 @@ "type":"blob", "streaming":true }, + "PlaybackMode":{ + "type":"string", + "enum":[ + "LIVE", + "ON_DEMAND" + ] + }, + "ResourceARN":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:aws:kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+" + }, "ResourceNotFoundException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Kinesis Video Streams can't find the stream that you specified.

", + "documentation":"

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL throws this error if a session with a PlaybackMode of ON_DEMAND is requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -259,6 +411,15 @@ } }, "documentation":"

The range of time stamps for which to return fragments.

" + }, + "UnsupportedStreamMediaTypeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An HLS streaming session was requested for a stream with a media type that is not video/h264.

", + "error":{"httpStatusCode":400}, + "exception":true } }, "documentation":"

" diff --git a/botocore/data/kinesis/2013-12-02/service-2.json b/botocore/data/kinesis/2013-12-02/service-2.json index 012059af..21eb2c60 100644 --- a/botocore/data/kinesis/2013-12-02/service-2.json +++ b/botocore/data/kinesis/2013-12-02/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"kinesis", "jsonVersion":"1.1", "protocol":"json", + "protocolSettings":{"h2":"eventstream"}, "serviceAbbreviation":"Kinesis", "serviceFullName":"Amazon Kinesis", "serviceId":"Kinesis", @@ -26,7 +27,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Adds or updates tags for the specified Kinesis data stream. Each stream can have up to 10 tags.

If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.

AddTagsToStream has a limit of five transactions per second per account.

" + "documentation":"

Adds or updates tags for the specified Kinesis data stream. Each time you invoke this operation, you can specify up to 10 tags. If you want to add more than 10 tags to your stream, you can invoke this operation multiple times. In total, each stream can have up to 50 tags.

If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.

AddTagsToStream has a limit of five transactions per second per account.

" }, "CreateStream":{ "name":"CreateStream", @@ -66,10 +67,25 @@ "input":{"shape":"DeleteStreamInput"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

Deletes a Kinesis data stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it receives the exception ResourceNotFoundException.

If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Kinesis Data Streams completes the deletion.

Note: Kinesis Data Streams might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete.

When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream.

You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.

DeleteStream has a limit of five transactions per second per account.

" }, + "DeregisterStreamConsumer":{ + "name":"DeregisterStreamConsumer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterStreamConsumerInput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"} + ], + "documentation":"

To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to deregister, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its name and ARN.

This operation has a limit of five transactions per second per account.

" + }, "DescribeLimits":{ "name":"DescribeLimits", "http":{ @@ -97,6 +113,21 @@ ], "documentation":"

Describes the specified Kinesis data stream.

The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream.

You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Data Streams Developer Guide.

There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard.

This operation has a limit of 10 transactions per second per account.

" }, + "DescribeStreamConsumer":{ + "name":"DescribeStreamConsumer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStreamConsumerInput"}, + "output":{"shape":"DescribeStreamConsumerOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"} + ], + "documentation":"

To get the description of a registered consumer, provide the ARN of the consumer. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to describe, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream.

This operation has a limit of 20 transactions per second per account.

" + }, "DescribeStreamSummary":{ "name":"DescribeStreamSummary", "http":{ @@ -163,7 +194,7 @@ {"shape":"KMSOptInRequired"}, {"shape":"KMSThrottlingException"} ], - "documentation":"

Gets data records from a Kinesis data stream's shard.

Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. It might take multiple calls to get to a portion of the shard that contains records.

You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit.

The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next five seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the stream, subsequent calls made within the next one second throw ProvisionedThroughputExceededException. GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second.

To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Data Streams Developer Guide).

Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side time stamp, whereas a client-side time stamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time stamp accuracy, or that the time stamp is always increasing. For example, records in a shard or across a stream might have time stamps that are out of order.

" + "documentation":"

Gets data records from a Kinesis data stream's shard.

Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. It might take multiple calls to get to a portion of the shard that contains records.

You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit. The maximum number of records that can be returned per call is 10,000.

The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MiB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the stream, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. GetRecords doesn't return any data when it throws an exception. For this reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than 1 second.

To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Data Streams Developer Guide).

Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side time stamp, whereas a client-side time stamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time stamp accuracy, or that the time stamp is always increasing. For example, records in a shard or across a stream might have time stamps that are out of order.

This operation has a limit of five transactions per second per account.

" }, "GetShardIterator":{ "name":"GetShardIterator", @@ -178,7 +209,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes after it is returned to the requester.

A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards.

You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.

When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.

If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards.

GetShardIterator has a limit of five transactions per second per account per open shard.

" + "documentation":"

Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is returned to the requester.

A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards.

You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.

When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.

If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards.

GetShardIterator has a limit of five transactions per second per account per open shard.

" }, "IncreaseStreamRetentionPeriod":{ "name":"IncreaseStreamRetentionPeriod", @@ -210,7 +241,24 @@ {"shape":"ExpiredNextTokenException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Lists the shards in a stream and provides information about each shard.

This API is a new operation that is used by the Amazon Kinesis Client Library (KCL). If you have a fine-grained IAM policy that only allows specific operations, you must update your policy to allow calls to this API. For more information, see Controlling Access to Amazon Kinesis Data Streams Resources Using IAM.

" + "documentation":"

Lists the shards in a stream and provides information about each shard. This operation has a limit of 100 transactions per second per data stream.

This API is a new operation that is used by the Amazon Kinesis Client Library (KCL). If you have a fine-grained IAM policy that only allows specific operations, you must update your policy to allow calls to this API. For more information, see Controlling Access to Amazon Kinesis Data Streams Resources Using IAM.

" + }, + "ListStreamConsumers":{ + "name":"ListStreamConsumers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamConsumersInput"}, + "output":{"shape":"ListStreamConsumersOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ExpiredNextTokenException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Lists the consumers registered to receive data from a stream using enhanced fan-out, and provides information about each consumer.

This operation has a limit of 10 transactions per second per account.

" }, "ListStreams":{ "name":"ListStreams", @@ -297,6 +345,22 @@ ], "documentation":"

Writes multiple data records into a Kinesis data stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Kinesis Data Streams as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Kinesis Data Streams attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" }, + "RegisterStreamConsumer":{ + "name":"RegisterStreamConsumer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterStreamConsumerInput"}, + "output":{"shape":"RegisterStreamConsumerOutput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can read data from the stream at a rate of up to 2 MiB per second. This rate is unaffected by the total number of consumers that read from the same stream.

You can register up to 5 consumers per stream. A given consumer can only be registered with one stream.

This operation has a limit of five transactions per second per account.

" + }, "RemoveTagsFromStream":{ "name":"RemoveTagsFromStream", "http":{ @@ -325,7 +389,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Splits a shard into two new shards in the Kinesis data stream, to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Kinesis Data Streams applications can simultaneously read data from the stream for real-time processing.

You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer Guide.

You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Kinesis Data Streams immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Kinesis Data Streams sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support.

If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

SplitShard has a limit of five transactions per second per account.

" + "documentation":"

Splits a shard into two new shards in the Kinesis data stream, to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Kinesis Data Streams applications can simultaneously read data from the stream for real-time processing.

You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer Guide.

You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Kinesis Data Streams immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Kinesis Data Streams sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

For the default shard limit for an AWS account, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support.

If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

SplitShard has a limit of five transactions per second per account.

" }, "StartStreamEncryption":{ "name":"StartStreamEncryption", @@ -346,7 +410,7 @@ {"shape":"KMSOptInRequired"}, {"shape":"KMSThrottlingException"} ], - "documentation":"

Enables or updates server-side encryption using an AWS KMS key for a specified stream.

Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, encryption begins for records written to the stream.

API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to five seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you enable encryption, you can verify that encryption is applied by inspecting the API response from PutRecord or PutRecords.

" + "documentation":"

Enables or updates server-side encryption using an AWS KMS key for a specified stream.

Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, encryption begins for records written to the stream.

API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you enable encryption, you can verify that encryption is applied by inspecting the API response from PutRecord or PutRecords.

" }, "StopStreamEncryption":{ "name":"StopStreamEncryption", @@ -361,7 +425,23 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disables server-side encryption for a specified stream.

Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Stopping encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, records written to the stream are no longer encrypted by Kinesis Data Streams.

API Limits: You can successfully disable server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to five seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you disabled encryption, you can verify that encryption is not applied by inspecting the API response from PutRecord or PutRecords.

" + "documentation":"

Disables server-side encryption for a specified stream.

Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Stopping encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, records written to the stream are no longer encrypted by Kinesis Data Streams.

API Limits: You can successfully disable server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you disabled encryption, you can verify that encryption is not applied by inspecting the API response from PutRecord or PutRecords.

" + }, + "SubscribeToShard":{ + "name":"SubscribeToShard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubscribeToShardInput"}, + "output":{"shape":"SubscribeToShardOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Call this operation from your consumer after you call RegisterStreamConsumer to register the consumer with Kinesis Data Streams. If the call succeeds, your consumer starts receiving events of type SubscribeToShardEvent for up to 5 minutes, after which time you need to call SubscribeToShard again to renew the subscription if you want to continue to receive records.

You can make one call to SubscribeToShard per second per ConsumerARN. If your call succeeds, and then you call the operation again less than 5 seconds later, the second call generates a ResourceInUseException. If you call the operation a second time more than 5 seconds after the first call succeeds, the second call succeeds and the first connection gets shut down.

" }, "UpdateShardCount":{ "name":"UpdateShardCount", @@ -377,7 +457,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Kinesis Data Streams performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has the following limits. You cannot do the following:

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To request an increase in the call rate limit, the shard limit for this API, or your overall shard limit, use the limits form.

" + "documentation":"

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Kinesis Data Streams performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has the following default limits. By default, you cannot do the following:

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To request an increase in the call rate limit, the shard limit for this API, or your overall shard limit, use the limits form.

" } }, "shapes":{ @@ -394,12 +474,102 @@ }, "Tags":{ "shape":"TagMap", - "documentation":"

The set of key-value pairs to use to create the tags.

" + "documentation":"

A set of up to 10 key-value pairs to use to create the tags.

" } }, "documentation":"

Represents the input for AddTagsToStream.

" }, "BooleanObject":{"type":"boolean"}, + "Consumer":{ + "type":"structure", + "required":[ + "ConsumerName", + "ConsumerARN", + "ConsumerStatus", + "ConsumerCreationTimestamp" + ], + "members":{ + "ConsumerName":{ + "shape":"ConsumerName", + "documentation":"

The name of the consumer is something you choose when you register the consumer.

" + }, + "ConsumerARN":{ + "shape":"ConsumerARN", + "documentation":"

When you register a consumer, Kinesis Data Streams generates an ARN for it. You need this ARN to be able to call SubscribeToShard.

If you delete a consumer and then create a new one with the same name, it won't have the same ARN. That's because consumer ARNs contain the creation timestamp. This is important to keep in mind if you have IAM policies that reference consumer ARNs.

" + }, + "ConsumerStatus":{ + "shape":"ConsumerStatus", + "documentation":"

A consumer can't read data while in the CREATING or DELETING states.

" + }, + "ConsumerCreationTimestamp":{ + "shape":"Timestamp", + "documentation":"

" + } + }, + "documentation":"

An object that represents the details of the consumer you registered.

" + }, + "ConsumerARN":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn):aws.*:kinesis:.*:\\d{12}:.*stream\\/[a-zA-Z0-9_.-]+\\/consumer\\/[a-zA-Z0-9_.-]+:[0-9]+" + }, + "ConsumerCountObject":{ + "type":"integer", + "max":1000000, + "min":0 + }, + "ConsumerDescription":{ + "type":"structure", + "required":[ + "ConsumerName", + "ConsumerARN", + "ConsumerStatus", + "ConsumerCreationTimestamp", + "StreamARN" + ], + "members":{ + "ConsumerName":{ + "shape":"ConsumerName", + "documentation":"

The name of the consumer is something you choose when you register the consumer.

" + }, + "ConsumerARN":{ + "shape":"ConsumerARN", + "documentation":"

When you register a consumer, Kinesis Data Streams generates an ARN for it. You need this ARN to be able to call SubscribeToShard.

If you delete a consumer and then create a new one with the same name, it won't have the same ARN. That's because consumer ARNs contain the creation timestamp. This is important to keep in mind if you have IAM policies that reference consumer ARNs.

" + }, + "ConsumerStatus":{ + "shape":"ConsumerStatus", + "documentation":"

A consumer can't read data while in the CREATING or DELETING states.

" + }, + "ConsumerCreationTimestamp":{ + "shape":"Timestamp", + "documentation":"

" + }, + "StreamARN":{ + "shape":"StreamARN", + "documentation":"

The ARN of the stream with which you registered the consumer.

" + } + }, + "documentation":"

An object that represents the details of a registered consumer.

" + }, + "ConsumerList":{ + "type":"list", + "member":{"shape":"Consumer"} + }, + "ConsumerName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ConsumerStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE" + ] + }, "CreateStreamInput":{ "type":"structure", "required":[ @@ -448,10 +618,31 @@ "StreamName":{ "shape":"StreamName", "documentation":"

The name of the stream to delete.

" + }, + "EnforceConsumerDeletion":{ + "shape":"BooleanObject", + "documentation":"

If this parameter is unset (null) or if you set it to false, and the stream has registered consumers, the call to DeleteStream fails with a ResourceInUseException.

" } }, "documentation":"

Represents the input for DeleteStream.

" }, + "DeregisterStreamConsumerInput":{ + "type":"structure", + "members":{ + "StreamARN":{ + "shape":"StreamARN", + "documentation":"

The ARN of the Kinesis data stream that the consumer is registered with. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + }, + "ConsumerName":{ + "shape":"ConsumerName", + "documentation":"

The name that you gave to the consumer.

" + }, + "ConsumerARN":{ + "shape":"ConsumerARN", + "documentation":"

The ARN returned by Kinesis Data Streams when you registered the consumer. If you don't know the ARN of the consumer that you want to deregister, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its ARN.

" + } + } + }, "DescribeLimitsInput":{ "type":"structure", "members":{ @@ -474,6 +665,33 @@ } } }, + "DescribeStreamConsumerInput":{ + "type":"structure", + "members":{ + "StreamARN":{ + "shape":"StreamARN", + "documentation":"

The ARN of the Kinesis data stream that the consumer is registered with. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + }, + "ConsumerName":{ + "shape":"ConsumerName", + "documentation":"

The name that you gave to the consumer.

" + }, + "ConsumerARN":{ + "shape":"ConsumerARN", + "documentation":"

The ARN returned by Kinesis Data Streams when you registered the consumer.

" + } + } + }, + "DescribeStreamConsumerOutput":{ + "type":"structure", + "required":["ConsumerDescription"], + "members":{ + "ConsumerDescription":{ + "shape":"ConsumerDescription", + "documentation":"

An object that represents the details of the consumer.

" + } + } + }, "DescribeStreamInput":{ "type":"structure", "required":["StreamName"], @@ -622,7 +840,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The pagination token passed to the ListShards operation is expired. For more information, see ListShardsInput$NextToken.

", + "documentation":"

The pagination token passed to the operation is expired.

", "exception":true }, "GetRecordsInput":{ @@ -745,6 +963,14 @@ }, "documentation":"

Represents the input for IncreaseStreamRetentionPeriod.

" }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, "InvalidArgumentException":{ "type":"structure", "members":{ @@ -851,7 +1077,7 @@ }, "ExclusiveStartShardId":{ "shape":"ShardId", - "documentation":"

The ID of the shard to start the list with.

If you don't specify this parameter, the default behavior is for ListShards to list the shards starting with the first one in the stream.

You cannot specify this parameter if you specify NextToken.

" + "documentation":"

Specify this parameter to indicate that you want to list the shards starting with the shard whose ID immediately follows ExclusiveStartShardId.

If you don't specify this parameter, the default behavior is for ListShards to list the shards starting with the first one in the stream.

You cannot specify this parameter if you specify NextToken.

" }, "MaxResults":{ "shape":"ListShardsInputLimit", @@ -881,6 +1107,46 @@ } } }, + "ListStreamConsumersInput":{ + "type":"structure", + "required":["StreamARN"], + "members":{ + "StreamARN":{ + "shape":"StreamARN", + "documentation":"

The ARN of the Kinesis data stream for which you want to list the registered consumers. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When the number of consumers that are registered with the data stream is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of consumers that are registered with the data stream, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListStreamConsumers to list the next set of registered consumers.

Don't specify StreamName or StreamCreationTimestamp if you specify NextToken because the latter unambiguously identifies the stream.

You can optionally specify a value for the MaxResults parameter when you specify NextToken. If you specify a MaxResults value that is less than the number of consumers that the operation returns if you don't specify MaxResults, the response will contain a new NextToken value. You can use the new NextToken value in a subsequent call to the ListStreamConsumers operation to list the next set of consumers.

Tokens expire after 300 seconds. When you obtain a value for NextToken in the response to a call to ListStreamConsumers, you have 300 seconds to use that value. If you specify an expired token in a call to ListStreamConsumers, you get ExpiredNextTokenException.

" + }, + "MaxResults":{ + "shape":"ListStreamConsumersInputLimit", + "documentation":"

The maximum number of consumers that you want a single call of ListStreamConsumers to return.

" + }, + "StreamCreationTimestamp":{ + "shape":"Timestamp", + "documentation":"

Specify this input parameter to distinguish data streams that have the same name. For example, if you create a data stream and then delete it, and you later create another data stream with the same name, you can use this input parameter to specify which of the two streams you want to list the consumers for.

You can't specify this parameter if you specify the NextToken parameter.

" + } + } + }, + "ListStreamConsumersInputLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "ListStreamConsumersOutput":{ + "type":"structure", + "members":{ + "Consumers":{ + "shape":"ConsumerList", + "documentation":"

An array of JSON objects. Each object represents one registered consumer.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When the number of consumers that are registered with the data stream is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of registered consumers, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListStreamConsumers to list the next set of registered consumers. For more information about the use of this pagination token when calling the ListStreamConsumers operation, see ListStreamConsumersInput$NextToken.

Tokens expire after 300 seconds. When you obtain a value for NextToken in the response to a call to ListStreamConsumers, you have 300 seconds to use that value. If you specify an expired token in a call to ListStreamConsumers, you get ExpiredNextTokenException.

" + } + } + }, "ListStreamsInput":{ "type":"structure", "members":{ @@ -939,7 +1205,7 @@ }, "ListTagsForStreamInputLimit":{ "type":"integer", - "max":10, + "max":50, "min":1 }, "ListTagsForStreamOutput":{ @@ -1213,6 +1479,33 @@ "type":"list", "member":{"shape":"Record"} }, + "RegisterStreamConsumerInput":{ + "type":"structure", + "required":[ + "StreamARN", + "ConsumerName" + ], + "members":{ + "StreamARN":{ + "shape":"StreamARN", + "documentation":"

The ARN of the Kinesis data stream that you want to register the consumer with. For more info, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + }, + "ConsumerName":{ + "shape":"ConsumerName", + "documentation":"

For a given Kinesis data stream, each consumer must have a unique name. However, consumer names don't have to be unique across data streams.

" + } + } + }, + "RegisterStreamConsumerOutput":{ + "type":"structure", + "required":["Consumer"], + "members":{ + "Consumer":{ + "shape":"Consumer", + "documentation":"

An object that represents the details of the consumer you registered. When you register a consumer, it gets an ARN that is generated by Kinesis Data Streams.

" + } + } + }, "RemoveTagsFromStreamInput":{ "type":"structure", "required":[ @@ -1387,6 +1680,15 @@ } } }, + "StartingPosition":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"ShardIteratorType"}, + "SequenceNumber":{"shape":"SequenceNumber"}, + "Timestamp":{"shape":"Timestamp"} + } + }, "StopStreamEncryptionInput":{ "type":"structure", "required":[ @@ -1409,7 +1711,12 @@ } } }, - "StreamARN":{"type":"string"}, + "StreamARN":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws.*:kinesis:.*:\\d{12}:stream/.*" + }, "StreamDescription":{ "type":"structure", "required":[ @@ -1513,6 +1820,10 @@ "OpenShardCount":{ "shape":"ShardCountObject", "documentation":"

The number of open shards in the stream.

" + }, + "ConsumerCount":{ + "shape":"ConsumerCountObject", + "documentation":"

The number of enhanced fan-out consumers registered with the stream.

" } }, "documentation":"

Represents the output for DescribeStreamSummary

" @@ -1536,6 +1847,76 @@ "UPDATING" ] }, + "SubscribeToShardEvent":{ + "type":"structure", + "required":[ + "Records", + "ContinuationSequenceNumber", + "MillisBehindLatest" + ], + "members":{ + "Records":{ + "shape":"RecordList", + "documentation":"

" + }, + "ContinuationSequenceNumber":{ + "shape":"SequenceNumber", + "documentation":"

Use this as StartingSequenceNumber in the next call to SubscribeToShard.

" + }, + "MillisBehindLatest":{ + "shape":"MillisBehindLatest", + "documentation":"

The number of milliseconds the read records are from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment.

" + } + }, + "documentation":"

After you call SubscribeToShard, Kinesis Data Streams sends events of this type to your consumer.

", + "event":true + }, + "SubscribeToShardEventStream":{ + "type":"structure", + "required":["SubscribeToShardEvent"], + "members":{ + "SubscribeToShardEvent":{"shape":"SubscribeToShardEvent"}, + "ResourceNotFoundException":{"shape":"ResourceNotFoundException"}, + "ResourceInUseException":{"shape":"ResourceInUseException"}, + "KMSDisabledException":{"shape":"KMSDisabledException"}, + "KMSInvalidStateException":{"shape":"KMSInvalidStateException"}, + "KMSAccessDeniedException":{"shape":"KMSAccessDeniedException"}, + "KMSNotFoundException":{"shape":"KMSNotFoundException"}, + "KMSOptInRequired":{"shape":"KMSOptInRequired"}, + "KMSThrottlingException":{"shape":"KMSThrottlingException"}, + "InternalFailureException":{"shape":"InternalFailureException"} + }, + "eventstream":true + }, + "SubscribeToShardInput":{ + "type":"structure", + "required":[ + "ConsumerARN", + "ShardId", + "StartingPosition" + ], + "members":{ + "ConsumerARN":{ + "shape":"ConsumerARN", + "documentation":"

For this parameter, use the value you obtained when you called RegisterStreamConsumer.

" + }, + "ShardId":{ + "shape":"ShardId", + "documentation":"

The ID of the shard you want to subscribe to. To see a list of all the shards for a given stream, use ListShards.

" + }, + "StartingPosition":{"shape":"StartingPosition"} + } + }, + "SubscribeToShardOutput":{ + "type":"structure", + "required":["EventStream"], + "members":{ + "EventStream":{ + "shape":"SubscribeToShardEventStream", + "documentation":"

The event stream that your consumer can use to read records from the shard.

" + } + } + }, "Tag":{ "type":"structure", "required":["Key"], @@ -1559,7 +1940,7 @@ "TagKeyList":{ "type":"list", "member":{"shape":"TagKey"}, - "max":10, + "max":50, "min":1 }, "TagList":{ @@ -1571,7 +1952,7 @@ "type":"map", "key":{"shape":"TagKey"}, "value":{"shape":"TagValue"}, - "max":10, + "max":50, "min":1 }, "TagValue":{ diff --git a/botocore/data/kinesisvideo/2017-09-30/service-2.json b/botocore/data/kinesisvideo/2017-09-30/service-2.json index 85c61d1b..98504cc0 100644 --- a/botocore/data/kinesisvideo/2017-09-30/service-2.json +++ b/botocore/data/kinesisvideo/2017-09-30/service-2.json @@ -187,7 +187,8 @@ "PUT_MEDIA", "GET_MEDIA", "LIST_FRAGMENTS", - "GET_MEDIA_FOR_FRAGMENT_LIST" + "GET_MEDIA_FOR_FRAGMENT_LIST", + "GET_HLS_STREAMING_SESSION_URL" ] }, "AccountStreamLimitExceededException":{ @@ -234,7 +235,7 @@ }, "DataRetentionInHours":{ "shape":"DataRetentionInHours", - "documentation":"

The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream.

The default value is 0, indicating that the stream does not persist data.

" + "documentation":"

The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream.

The default value is 0, indicating that the stream does not persist data.

When the DataRetentionInHours value is 0, consumers can still consume the fragments that remain in the service host buffer, which has a retention time limit of 5 minutes and a retention memory limit of 200 MB. Fragments are removed from the buffer when either limit is reached.

" } } }, @@ -699,7 +700,7 @@ }, "MediaType":{ "shape":"MediaType", - "documentation":"

The stream's media type. Use MediaType to specify the type of content that the stream contains to the consumers of the stream. For more information about media types, see Media Types. If you choose to specify the MediaType, see Naming Requirements.

To play video on the console, you must specify the correct video type. For example, if the video in the stream is H.264, specify video/h264 as the MediaType.

" + "documentation":"

The stream's media type. Use MediaType to specify the type of content that the stream contains to the consumers of the stream. For more information about media types, see Media Types. If you choose to specify the MediaType, see Naming Requirements.

To play video on the console, you must specify the correct video type. For example, if the video in the stream is H.264, specify video/h264 as the MediaType.

" } } }, @@ -719,7 +720,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The stream version that you specified is not the latest version. To get the latest version, use the DescribeStream API.

", + "documentation":"

The stream version that you specified is not the latest version. To get the latest version, use the DescribeStream API.

", "error":{"httpStatusCode":400}, "exception":true } diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index 79be1560..3fcccccd 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Cancels the deletion of a customer master key (CMK). When this operation is successful, the CMK is set to the Disabled state. To enable a CMK, use EnableKey. You cannot perform this operation on a CMK in a different AWS account.

For more information about scheduling and canceling deletion of a CMK, see Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

" + "documentation":"

Cancels the deletion of a customer master key (CMK). When this operation is successful, the CMK is set to the Disabled state. To enable a CMK, use EnableKey. You cannot perform this operation on a CMK in a different AWS account.

For more information about scheduling and canceling deletion of a CMK, see Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateAlias":{ "name":"CreateAlias", @@ -46,7 +46,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Creates a display name for a customer master key (CMK). You can use an alias to identify a CMK in selected operations, such as Encrypt and GenerateDataKey.

Each CMK can have multiple aliases, but each alias points to only one CMK. The alias name must be unique in the AWS account and region. To simplify code that runs in multiple regions, use the same alias name, but point it to a different CMK in each region.

Because an alias is not a property of a CMK, you can delete and change the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs, use the ListAliases operation.

An alias must start with the word alias followed by a forward slash (alias/). The alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). Alias names cannot begin with aws; that alias name prefix is reserved by Amazon Web Services (AWS).

The alias and the CMK it is mapped to must be in the same AWS account and the same region. You cannot perform this operation on an alias in a different AWS account.

To map an existing alias to a different CMK, call UpdateAlias.

" + "documentation":"

Creates a display name for a customer-managed customer master key (CMK). You can use an alias to identify a CMK in selected operations, such as Encrypt and GenerateDataKey.

Each CMK can have multiple aliases, but each alias points to only one CMK. The alias name must be unique in the AWS account and region. To simplify code that runs in multiple regions, use the same alias name, but point it to a different CMK in each region.

Because an alias is not a property of a CMK, you can delete and change the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs, use the ListAliases operation.

The alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). Alias names cannot begin with aws/. That alias name prefix is reserved for AWS managed CMKs.

The alias and the CMK it is mapped to must be in the same AWS account and the same region. You cannot perform this operation on an alias in a different AWS account.

To map an existing alias to a different CMK, call UpdateAlias.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateGrant":{ "name":"CreateGrant", @@ -66,7 +66,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Adds a grant to a customer master key (CMK). The grant specifies who can use the CMK and under what conditions. When setting permissions, grants are an alternative to key policies.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter. For more information about grants, see Grants in the AWS Key Management Service Developer Guide.

" + "documentation":"

Adds a grant to a customer master key (CMK). The grant specifies who can use the CMK and under what conditions. When setting permissions, grants are an alternative to key policies.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter. For more information about grants, see Grants in the AWS Key Management Service Developer Guide.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateKey":{ "name":"CreateKey", @@ -85,7 +85,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagException"} ], - "documentation":"

Creates a customer master key (CMK) in the caller's AWS account.

You can use a CMK to encrypt small amounts of data (4 KiB or less) directly, but CMKs are more commonly used to encrypt data encryption keys (DEKs), which are used to encrypt raw data. For more information about DEKs and the difference between CMKs and DEKs, see the following:

You cannot use this operation to create a CMK in a different AWS account.

" + "documentation":"

Creates a customer master key (CMK) in the caller's AWS account.

You can use a CMK to encrypt small amounts of data (4 KiB or less) directly. But CMKs are more commonly used to encrypt data encryption keys (DEKs), which are used to encrypt raw data. For more information about DEKs and the difference between CMKs and DEKs, see the following:

You cannot use this operation to create a CMK in a different AWS account.

" }, "Decrypt":{ "name":"Decrypt", @@ -105,7 +105,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted by using any of the following operations:

Note that if a caller has been granted access permissions to all keys (through, for example, IAM user policies that grant Decrypt permission on all resources), then ciphertext encrypted by using keys in other accounts where the key grants access to the caller can be decrypted. To remedy this, we recommend that you do not grant Decrypt access in an IAM user policy. Instead grant Decrypt access only in key policies. If you must grant Decrypt access in an IAM user policy, you should scope the resource to specific keys or to specific trusted accounts.

" + "documentation":"

Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted by using any of the following operations:

Whenever possible, use key policies to give users permission to call the Decrypt operation on the CMK, instead of IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all CMKs. This user could decrypt ciphertext that was encrypted by CMKs in other accounts if the key policy for the cross-account CMK permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular CMKs or particular trusted accounts.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -137,7 +137,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Deletes key material that you previously imported. This operation makes the specified customer master key (CMK) unusable. For more information about importing key material into AWS KMS, see Importing Key Material in the AWS Key Management Service Developer Guide. You cannot perform this operation on a CMK in a different AWS account.

When the specified CMK is in the PendingDeletion state, this operation does not change the CMK's state. Otherwise, it changes the CMK's state to PendingImport.

After you delete key material, you can use ImportKeyMaterial to reimport the same key material into the CMK.

" + "documentation":"

Deletes key material that you previously imported. This operation makes the specified customer master key (CMK) unusable. For more information about importing key material into AWS KMS, see Importing Key Material in the AWS Key Management Service Developer Guide. You cannot perform this operation on a CMK in a different AWS account.

When the specified CMK is in the PendingDeletion state, this operation does not change the CMK's state. Otherwise, it changes the CMK's state to PendingImport.

After you delete key material, you can use ImportKeyMaterial to reimport the same key material into the CMK.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "DescribeKey":{ "name":"DescribeKey", @@ -153,7 +153,7 @@ {"shape":"DependencyTimeoutException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Provides detailed information about the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" + "documentation":"

Provides detailed information about the specified customer master key (CMK).

You can use DescribeKey on a predefined AWS alias, that is, an AWS alias with no key ID. When you do, AWS KMS associates the alias with an AWS managed CMK and returns its KeyId and Arn in the response.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" }, "DisableKey":{ "name":"DisableKey", @@ -169,7 +169,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "DisableKeyRotation":{ "name":"DisableKeyRotation", @@ -187,7 +187,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Disables automatic rotation of the key material for the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

" + "documentation":"

Disables automatic rotation of the key material for the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "EnableKey":{ "name":"EnableKey", @@ -204,7 +204,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Sets the state of a customer master key (CMK) to enabled, thereby permitting its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

" + "documentation":"

Sets the state of a customer master key (CMK) to enabled, thereby permitting its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "EnableKeyRotation":{ "name":"EnableKeyRotation", @@ -222,7 +222,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Enables automatic rotation of the key material for the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

" + "documentation":"

Enables automatic rotation of the key material for the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "Encrypt":{ "name":"Encrypt", @@ -242,7 +242,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases:

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Unless you are moving encrypted data from one region to another, you don't use this operation to encrypt a generated data key within a region. To get data keys that are already encrypted, call the GenerateDataKey or GenerateDataKeyWithoutPlaintext operation. Data keys don't need to be encrypted again by calling Encrypt.

To encrypt data locally in your application, use the GenerateDataKey operation to return a plaintext data encryption key and a copy of the key encrypted under the CMK of your choosing.

" + "documentation":"

Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases:

You don't need use this operation to encrypt a data key within a region. The GenerateDataKey and GenerateDataKeyWithoutPlaintext operations return an encrypted data key.

Also, you don't need to use this operation to encrypt data in your application. You can use the plaintext and encrypted data keys that the GenerateDataKey operation returns.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" }, "GenerateDataKey":{ "name":"GenerateDataKey", @@ -262,7 +262,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns a data encryption key that you can use in your application to encrypt data locally.

You must specify the customer master key (CMK) under which to generate the data key. You must also specify the length of the data key using either the KeySpec or NumberOfBytes field. You must specify one field or the other, but not both. For common key lengths (128-bit and 256-bit symmetric keys), we recommend that you use KeySpec. To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

This operation returns a plaintext copy of the data key in the Plaintext field of the response, and an encrypted copy of the data key in the CiphertextBlob field. The data key is encrypted under the CMK specified in the KeyId field of the request.

We recommend that you use the following pattern to encrypt data locally in your application:

  1. Use this operation (GenerateDataKey) to get a data encryption key.

  2. Use the plaintext data encryption key (returned in the Plaintext field of the response) to encrypt data locally, then erase the plaintext data key from memory.

  3. Store the encrypted data key (returned in the CiphertextBlob field of the response) alongside the locally encrypted data.

To decrypt data locally:

  1. Use the Decrypt operation to decrypt the encrypted data key into a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data locally, then erase the plaintext data key from memory.

To return only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To return a random byte string that is cryptographically secure, use GenerateRandom.

If you use the optional EncryptionContext field, you must store at least enough information to be able to reconstruct the full encryption context when you later send the ciphertext to the Decrypt operation. It is a good practice to choose an encryption context that you can reconstruct on the fly to better secure the ciphertext. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" + "documentation":"

Returns a data encryption key that you can use in your application to encrypt data locally.

You must specify the customer master key (CMK) under which to generate the data key. You must also specify the length of the data key using either the KeySpec or NumberOfBytes field. You must specify one field or the other, but not both. For common key lengths (128-bit and 256-bit symmetric keys), we recommend that you use KeySpec. To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

This operation returns a plaintext copy of the data key in the Plaintext field of the response, and an encrypted copy of the data key in the CiphertextBlob field. The data key is encrypted under the CMK specified in the KeyId field of the request.

We recommend that you use the following pattern to encrypt data locally in your application:

  1. Use this operation (GenerateDataKey) to get a data encryption key.

  2. Use the plaintext data encryption key (returned in the Plaintext field of the response) to encrypt data locally, then erase the plaintext data key from memory.

  3. Store the encrypted data key (returned in the CiphertextBlob field of the response) alongside the locally encrypted data.

To decrypt data locally:

  1. Use the Decrypt operation to decrypt the encrypted data key into a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data locally, then erase the plaintext data key from memory.

To return only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To return a random byte string that is cryptographically secure, use GenerateRandom.

If you use the optional EncryptionContext field, you must store at least enough information to be able to reconstruct the full encryption context when you later send the ciphertext to the Decrypt operation. It is a good practice to choose an encryption context that you can reconstruct on the fly to better secure the ciphertext. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateDataKeyWithoutPlaintext":{ "name":"GenerateDataKeyWithoutPlaintext", @@ -282,7 +282,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns a data encryption key encrypted under a customer master key (CMK). This operation is identical to GenerateDataKey but returns only the encrypted copy of the data key.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

This operation is useful in a system that has multiple components with different degrees of trust. For example, consider a system that stores encrypted data in containers. Each container stores the encrypted data and an encrypted copy of the data key. One component of the system, called the control plane, creates new containers. When it creates a new container, it uses this operation (GenerateDataKeyWithoutPlaintext) to get an encrypted data key and then stores it in the container. Later, a different component of the system, called the data plane, puts encrypted data into the containers. To do this, it passes the encrypted data key to the Decrypt operation, then uses the returned plaintext data key to encrypt data, and finally stores the encrypted data in the container. In this system, the control plane never sees the plaintext data key.

" + "documentation":"

Returns a data encryption key encrypted under a customer master key (CMK). This operation is identical to GenerateDataKey but returns only the encrypted copy of the data key.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

This operation is useful in a system that has multiple components with different degrees of trust. For example, consider a system that stores encrypted data in containers. Each container stores the encrypted data and an encrypted copy of the data key. One component of the system, called the control plane, creates new containers. When it creates a new container, it uses this operation (GenerateDataKeyWithoutPlaintext) to get an encrypted data key and then stores it in the container. Later, a different component of the system, called the data plane, puts encrypted data into the containers. To do this, it passes the encrypted data key to the Decrypt operation. It then uses the returned plaintext data key to encrypt data and finally stores the encrypted data in the container. In this system, the control plane never sees the plaintext data key.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateRandom":{ "name":"GenerateRandom", @@ -331,7 +331,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" + "documentation":"

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified customer master key (CMK).

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" }, "GetParametersForImport":{ "name":"GetParametersForImport", @@ -349,7 +349,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Returns the items you need in order to import key material into AWS KMS from your existing key management infrastructure. For more information about importing key material into AWS KMS, see Importing Key Material in the AWS Key Management Service Developer Guide.

You must specify the key ID of the customer master key (CMK) into which you will import key material. This CMK's Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on a CMK in a different AWS account.

This operation returns a public key and an import token. Use the public key to encrypt the key material. Store the import token to send with a subsequent ImportKeyMaterial request. The public key and import token from the same response must be used together. These items are valid for 24 hours. When they expire, they cannot be used for a subsequent ImportKeyMaterial request. To get new ones, send another GetParametersForImport request.

" + "documentation":"

Returns the items you need in order to import key material into AWS KMS from your existing key management infrastructure. For more information about importing key material into AWS KMS, see Importing Key Material in the AWS Key Management Service Developer Guide.

You must specify the key ID of the customer master key (CMK) into which you will import key material. This CMK's Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on a CMK in a different AWS account.

This operation returns a public key and an import token. Use the public key to encrypt the key material. Store the import token to send with a subsequent ImportKeyMaterial request. The public key and import token from the same response must be used together. These items are valid for 24 hours. When they expire, they cannot be used for a subsequent ImportKeyMaterial request. To get new ones, send another GetParametersForImport request.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "ImportKeyMaterial":{ "name":"ImportKeyMaterial", @@ -371,7 +371,7 @@ {"shape":"ExpiredImportTokenException"}, {"shape":"InvalidImportTokenException"} ], - "documentation":"

Imports key material into an existing AWS KMS customer master key (CMK) that was created without key material. You cannot perform this operation on a CMK in a different AWS account. For more information about creating CMKs with no key material and then importing key material, see Importing Key Material in the AWS Key Management Service Developer Guide.

Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

When calling this operation, you must specify the following values:

When this operation is successful, the CMK's key state changes from PendingImport to Enabled, and you can use the CMK. After you successfully import key material into a CMK, you can reimport the same key material into that CMK, but you cannot import different key material.

" + "documentation":"

Imports key material into an existing AWS KMS customer master key (CMK) that was created without key material. You cannot perform this operation on a CMK in a different AWS account. For more information about creating CMKs with no key material and then importing key material, see Importing Key Material in the AWS Key Management Service Developer Guide.

Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

When calling this operation, you must specify the following values:

When this operation is successful, the CMK's key state changes from PendingImport to Enabled, and you can use the CMK. After you successfully import key material into a CMK, you can reimport the same key material into that CMK, but you cannot import different key material.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "ListAliases":{ "name":"ListAliases", @@ -386,7 +386,7 @@ {"shape":"InvalidMarkerException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Gets a list of all aliases in the caller's AWS account and region. You cannot list aliases in other accounts. For more information about aliases, see CreateAlias.

The response might include several aliases that do not have a TargetKeyId field because they are not associated with a CMK. These are predefined aliases that are reserved for CMKs managed by AWS services. If an alias is not associated with a CMK, the alias does not count against the alias limit for your account.

" + "documentation":"

Gets a list of aliases in the caller's AWS account and region. You cannot list aliases in other accounts. For more information about aliases, see CreateAlias.

By default, the ListAliases command returns all aliases in the account and region. To get only the aliases that point to a particular customer master key (CMK), use the KeyId parameter.

The ListAliases response can include aliases that you created and associated with your customer managed CMKs, and aliases that AWS created and associated with AWS managed CMKs in your account. You can recognize AWS aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

The response might also include aliases that have no TargetKeyId field. These are predefined aliases that AWS has created but has not yet associated with a CMK. Aliases that AWS creates in your account, including predefined aliases, do not count against your AWS KMS aliases limit.

" }, "ListGrants":{ "name":"ListGrants", @@ -404,7 +404,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Gets a list of all grants for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" + "documentation":"

Gets a list of all grants for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" }, "ListKeyPolicies":{ "name":"ListKeyPolicies", @@ -509,7 +509,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Encrypts data on the server side with a new customer master key (CMK) without exposing the plaintext of the data on the client side. The data is first decrypted and then reencrypted. You can also use this operation to change the encryption context of a ciphertext.

You can reencrypt data using CMKs in different AWS accounts.

Unlike other operations, ReEncrypt is authorized twice, once as ReEncryptFrom on the source CMK and once as ReEncryptTo on the destination CMK. We recommend that you include the \"kms:ReEncrypt*\" permission in your key policies to permit reencryption from or to the CMK. This permission is automatically included in the key policy when you create a CMK through the console, but you must include it manually when you create a CMK programmatically or when you set a key policy with the PutKeyPolicy operation.

" + "documentation":"

Encrypts data on the server side with a new customer master key (CMK) without exposing the plaintext of the data on the client side. The data is first decrypted and then reencrypted. You can also use this operation to change the encryption context of a ciphertext.

You can reencrypt data using CMKs in different AWS accounts.

Unlike other operations, ReEncrypt is authorized twice, once as ReEncryptFrom on the source CMK and once as ReEncryptTo on the destination CMK. We recommend that you include the \"kms:ReEncrypt*\" permission in your key policies to permit reencryption from or to the CMK. This permission is automatically included in the key policy when you create a CMK through the console. But you must include it manually when you create a CMK programmatically or when you set a key policy with the PutKeyPolicy operation.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "RetireGrant":{ "name":"RetireGrant", @@ -544,7 +544,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Revokes the specified grant for the specified customer master key (CMK). You can revoke a grant to actively deny operations that depend on it.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" + "documentation":"

Revokes the specified grant for the specified customer master key (CMK). You can revoke a grant to actively deny operations that depend on it.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" }, "ScheduleKeyDeletion":{ "name":"ScheduleKeyDeletion", @@ -561,7 +561,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Schedules the deletion of a customer master key (CMK). You may provide a waiting period, specified in days, before deletion occurs. If you do not provide a waiting period, the default period of 30 days is used. When this operation is successful, the state of the CMK changes to PendingDeletion. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the CMK. After the waiting period ends, AWS KMS deletes the CMK and all AWS KMS data associated with it, including all aliases that refer to it.

You cannot perform this operation on a CMK in a different AWS account.

Deleting a CMK is a destructive and potentially dangerous operation. When a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. To restrict the use of a CMK without deleting it, use DisableKey.

For more information about scheduling a CMK for deletion, see Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

" + "documentation":"

Schedules the deletion of a customer master key (CMK). You may provide a waiting period, specified in days, before deletion occurs. If you do not provide a waiting period, the default period of 30 days is used. When this operation is successful, the state of the CMK changes to PendingDeletion. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the CMK. After the waiting period ends, AWS KMS deletes the CMK and all AWS KMS data associated with it, including all aliases that refer to it.

You cannot perform this operation on a CMK in a different AWS account.

Deleting a CMK is a destructive and potentially dangerous operation. When a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. To restrict the use of a CMK without deleting it, use DisableKey.

For more information about scheduling a CMK for deletion, see Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "TagResource":{ "name":"TagResource", @@ -578,7 +578,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagException"} ], - "documentation":"

Adds or overwrites one or more tags for the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty (null) strings.

You cannot use the same tag key more than once per CMK. For example, consider a CMK with one tag whose tag key is Purpose and tag value is Test. If you send a TagResource request for this CMK with a tag key of Purpose and a tag value of Prod, it does not create a second tag. Instead, the original tag is overwritten with the new tag value.

For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Adds or edits tags for a customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty (null) strings.

You can only use a tag key once for each CMK. If you use the tag key again, AWS KMS replaces the current tag value with the specified value.

For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -594,7 +594,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"TagException"} ], - "documentation":"

Removes the specified tag or tags from the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

To remove a tag, you specify the tag key for each tag to remove. You do not specify the tag value. To overwrite the tag value for an existing tag, use TagResource.

" + "documentation":"

Removes the specified tags from the specified customer master key (CMK). You cannot perform this operation on a CMK in a different AWS account.

To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -609,7 +609,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Associates an existing alias with a different customer master key (CMK). Each CMK can have multiple aliases, but the aliases must be unique within the account and region. You cannot perform this operation on an alias in a different AWS account.

This operation works only on existing aliases. To change the alias of a CMK to a new value, use CreateAlias to create a new alias and DeleteAlias to delete the old alias.

Because an alias is not a property of a CMK, you can create, update, and delete the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs in the account, use the ListAliases operation.

An alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). An alias must start with the word alias followed by a forward slash (alias/). The alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). Alias names cannot begin with aws; that alias name prefix is reserved by Amazon Web Services (AWS).

" + "documentation":"

Associates an existing alias with a different customer master key (CMK). Each CMK can have multiple aliases, but the aliases must be unique within the account and region. You cannot perform this operation on an alias in a different AWS account.

This operation works only on existing aliases. To change the alias of a CMK to a new value, use CreateAlias to create a new alias and DeleteAlias to delete the old alias.

Because an alias is not a property of a CMK, you can create, update, and delete the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs in the account, use the ListAliases operation.

An alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). An alias must start with the word alias followed by a forward slash (alias/). The alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). Alias names cannot begin with aws; that alias name prefix is reserved by Amazon Web Services (AWS).

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "UpdateKeyDescription":{ "name":"UpdateKeyDescription", @@ -625,7 +625,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Updates the description of a customer master key (CMK). To see the decription of a CMK, use DescribeKey.

You cannot perform this operation on a CMK in a different AWS account.

" + "documentation":"

Updates the description of a customer master key (CMK). To see the description of a CMK, use DescribeKey.

You cannot perform this operation on a CMK in a different AWS account.

The result of this operation varies with the key state of the CMK. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" } }, "shapes":{ @@ -713,7 +713,7 @@ "members":{ "AliasName":{ "shape":"AliasNameType", - "documentation":"

String that contains the display name. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/AWS\" are reserved.

" + "documentation":"

Specifies the alias name. This value must begin with alias/ followed by the alias name, such as alias/ExampleAlias. The alias name cannot begin with aws/. The alias/aws/ prefix is reserved for AWS managed CMKs.

" }, "TargetKeyId":{ "shape":"KeyIdType", @@ -755,7 +755,7 @@ }, "Name":{ "shape":"GrantNameType", - "documentation":"

A friendly name for identifying the grant. Use this value to prevent unintended creation of duplicate grants when retrying this request.

When this value is absent, all CreateGrant requests result in a new grant with a unique GrantId even if all the supplied parameters are identical. This can result in unintended duplicates when you retry the CreateGrant request.

When this value is present, you can retry a CreateGrant request with identical parameters; if the grant already exists, the original GrantId is returned without creating a new grant. Note that the returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is returned. All grant tokens obtained in this way can be used interchangeably.

" + "documentation":"

A friendly name for identifying the grant. Use this value to prevent the unintended creation of duplicate grants when retrying this request.

When this value is absent, all CreateGrant requests result in a new grant with a unique GrantId even if all the supplied parameters are identical. This can result in unintended duplicates when you retry the CreateGrant request.

When this value is present, you can retry a CreateGrant request with identical parameters; if the grant already exists, the original GrantId is returned without creating a new grant. Note that the returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is returned. All grant tokens obtained in this way can be used interchangeably.

" } } }, @@ -777,7 +777,7 @@ "members":{ "Policy":{ "shape":"PolicyType", - "documentation":"

The key policy to attach to the CMK.

If you provide a key policy, it must meet the following criteria:

If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

The key policy size limit is 32 kilobytes (32768 bytes).

" + "documentation":"

The key policy to attach to the CMK.

If you provide a key policy, it must meet the following criteria:

If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

The key policy size limit is 32 kilobytes (32768 bytes).

" }, "Description":{ "shape":"DescriptionType", @@ -884,7 +884,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

A unique identifier for the customer master key (CMK).

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Describes the specified customer master key (CMK).

If you specify a predefined AWS alias (an AWS alias with no key ID), KMS associates the alias with an AWS managed CMK and returns its KeyId and Arn in the response.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -963,7 +963,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

A unique identifier for the customer master key (CMK).

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

A unique identifier for the customer master key (CMK).

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "Plaintext":{ "shape":"PlaintextType", @@ -1021,7 +1021,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK under which to generate and encrypt the data encryption key.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

The identifier of the CMK under which to generate and encrypt the data encryption key.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "EncryptionContext":{ "shape":"EncryptionContextType", @@ -1064,7 +1064,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the customer master key (CMK) under which to generate and encrypt the data encryption key.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

The identifier of the customer master key (CMK) under which to generate and encrypt the data encryption key.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "EncryptionContext":{ "shape":"EncryptionContextType", @@ -1174,7 +1174,7 @@ }, "WrappingAlgorithm":{ "shape":"AlgorithmSpec", - "documentation":"

The algorithm you will use to encrypt the key material before importing it with ImportKeyMaterial. For more information, see Encrypt the Key Material in the AWS Key Management Service Developer Guide.

" + "documentation":"

The algorithm you use to encrypt the key material before importing it with ImportKeyMaterial. For more information, see Encrypt the Key Material in the AWS Key Management Service Developer Guide.

" }, "WrappingKeySpec":{ "shape":"WrappingKeySpec", @@ -1215,7 +1215,7 @@ "documentation":"

A list of key-value pairs that must be present in the encryption context of certain subsequent operations that the grant allows. When certain subsequent operations allowed by the grant include encryption context that matches this list, the grant allows the operation. Otherwise, the grant does not allow the operation.

" } }, - "documentation":"

A structure that you can use to allow certain operations in the grant only when the desired encryption context is present. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide.

Grant constraints apply only to operations that accept encryption context as input. For example, the DescribeKey operation does not accept encryption context as input. A grant that allows the DescribeKey operation does so regardless of the grant constraints. In constrast, the Encrypt operation accepts encryption context as input. A grant that allows the Encrypt operation does so only when the encryption context of the Encrypt operation satisfies the grant constraints.

" + "documentation":"

A structure that you can use to allow certain operations in the grant only when the preferred encryption context is present. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide.

Grant constraints apply only to operations that accept encryption context as input. For example, the DescribeKey operation does not accept encryption context as input. A grant that allows the DescribeKey operation does so regardless of the grant constraints. In contrast, the Encrypt operation accepts encryption context as input. A grant that allows the Encrypt operation does so only when the encryption context of the Encrypt operation satisfies the grant constraints.

" }, "GrantIdType":{ "type":"string", @@ -1510,7 +1510,7 @@ }, "KeyManager":{ "shape":"KeyManagerType", - "documentation":"

The CMK's manager. CMKs are either customer-managed or AWS-managed. For more information about the difference, see Customer Master Keys in the AWS Key Management Service Developer Guide.

" + "documentation":"

The CMK's manager. CMKs are either customer managed or AWS managed. For more information about the difference, see Customer Master Keys in the AWS Key Management Service Developer Guide.

" } }, "documentation":"

Contains metadata about a customer master key (CMK).

This data type is used as a response element for the CreateKey and DescribeKey operations.

" @@ -1553,6 +1553,10 @@ "ListAliasesRequest":{ "type":"structure", "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

Lists only aliases that refer to the specified CMK. The value of this parameter can be the ID or Amazon Resource Name (ARN) of a CMK in the caller's account and region. You cannot use an alias name or alias ARN in this value.

This parameter is optional. If you omit it, ListAliases returns all aliases in the account and region.

" + }, "Limit":{ "shape":"LimitType", "documentation":"

Use this parameter to specify the maximum number of items to return. When this value is present, AWS KMS does not return more than the specified number of items, but it might return fewer.

This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

" @@ -1817,7 +1821,7 @@ }, "Policy":{ "shape":"PolicyType", - "documentation":"

The key policy to attach to the CMK.

The key policy must meet the following criteria:

The key policy size limit is 32 kilobytes (32768 bytes).

" + "documentation":"

The key policy to attach to the CMK.

The key policy must meet the following criteria:

The key policy size limit is 32 kilobytes (32768 bytes).

" }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -1842,7 +1846,7 @@ }, "DestinationKeyId":{ "shape":"KeyIdType", - "documentation":"

A unique identifier for the CMK that is used to reencrypt the data.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

A unique identifier for the CMK that is used to reencrypt the data.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "DestinationEncryptionContext":{ "shape":"EncryptionContextType", @@ -2057,5 +2061,5 @@ "enum":["RSA_2048"] } }, - "documentation":"AWS Key Management Service

AWS Key Management Service (AWS KMS) is an encryption and key management web service. This guide describes the AWS KMS operations that you can call programmatically. For general information about AWS KMS, see the AWS Key Management Service Developer Guide.

AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS KMS and other AWS services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to AWS KMS.

Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account (root) access key ID and secret key for everyday work with AWS KMS. Instead, use the access key ID and secret access key for an IAM user, or you can use the AWS Security Token Service to generate temporary security credentials that you can use to sign requests.

All AWS KMS operations require Signature Version 4.

Logging API Requests

AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related events for your AWS account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to AWS KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used APIs

Of the APIs discussed in this guide, the following will prove the most useful for most applications. You will likely perform actions other than these, such as creating keys and assigning policies, by using the console.

" + "documentation":"AWS Key Management Service

AWS Key Management Service (AWS KMS) is an encryption and key management web service. This guide describes the AWS KMS operations that you can call programmatically. For general information about AWS KMS, see the AWS Key Management Service Developer Guide.

AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS KMS and other AWS services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to AWS KMS.

Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account (root) access key ID and secret key for everyday work with AWS KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the AWS Security Token Service to generate temporary security credentials that you can use to sign requests.

All AWS KMS operations require Signature Version 4.

Logging API Requests

AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related events for your AWS account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to AWS KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

" } diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 1a478350..4ff0125c 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -397,9 +397,10 @@ {"shape":"InvalidSequenceTokenException"}, {"shape":"DataAlreadyAcceptedException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"UnrecognizedClientException"} ], - "documentation":"

Uploads a batch of log events to the specified log stream.

You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

The batch of events must satisfy the following constraints:

" + "documentation":"

Uploads a batch of log events to the specified log stream.

You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

The batch of events must satisfy the following constraints:

If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is an invalid AWS access key ID or secret key.

" }, "PutMetricFilter":{ "name":"PutMetricFilter", @@ -430,7 +431,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates or updates a resource policy allowing other AWS services to put log events to this account, such as Amazon Route 53. An account can have up to 50 resource policies per region.

" + "documentation":"

Creates or updates a resource policy allowing other AWS services to put log events to this account, such as Amazon Route 53. An account can have up to 10 resource policies per region.

" }, "PutRetentionPolicy":{ "name":"PutRetentionPolicy", @@ -823,7 +824,7 @@ }, "logStreamNamePrefix":{ "shape":"LogStreamName", - "documentation":"

The prefix to match.

iIf orderBy is LastEventTime,you cannot specify this parameter.

" + "documentation":"

The prefix to match.

If orderBy is LastEventTime,you cannot specify this parameter.

" }, "orderBy":{ "shape":"OrderBy", @@ -872,10 +873,13 @@ "shape":"DescribeLimit", "documentation":"

The maximum number of items returned. If you don't specify a value, the default is up to 50 items.

" }, - "metricName":{"shape":"MetricName"}, + "metricName":{ + "shape":"MetricName", + "documentation":"

Filters results to include only those with the specified metric name. If you include this parameter in your request, you must also include the metricNamespace parameter.

" + }, "metricNamespace":{ "shape":"MetricNamespace", - "documentation":"

The namespace of the CloudWatch metric.

" + "documentation":"

Filters results to include only those in the specified namespace. If you include this parameter in your request, you must also include the metricName parameter.

" } } }, @@ -1236,11 +1240,11 @@ }, "startTime":{ "shape":"Timestamp", - "documentation":"

The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a time stamp earlier than this time are not included.

" + "documentation":"

The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a time stamp equal to this time or later than this time are included. Events with a time stamp earlier than this time are not included.

" }, "endTime":{ "shape":"Timestamp", - "documentation":"

The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a time stamp later than this time are not included.

" + "documentation":"

The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a time stamp equal to or later than this time are not included.

" }, "nextToken":{ "shape":"NextToken", @@ -1265,11 +1269,11 @@ }, "nextForwardToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of items in the forward direction. The token expires after 24 hours.

" + "documentation":"

The token for the next set of items in the forward direction. The token expires after 24 hours. If you have reached the end of the stream, it will return the same token you passed in.

" }, "nextBackwardToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of items in the backward direction. The token expires after 24 hours.

" + "documentation":"

The token for the next set of items in the backward direction. The token expires after 24 hours. This token will never be null. If you have reached the end of the stream, it will return the same token you passed in.

" } } }, @@ -1282,7 +1286,7 @@ "members":{ "timestamp":{ "shape":"Timestamp", - "documentation":"

The time the event occurred, expressed as the number of milliseconds fter Jan 1, 1970 00:00:00 UTC.

" + "documentation":"

The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" }, "message":{ "shape":"EventMessage", @@ -1715,7 +1719,7 @@ }, "policyDocument":{ "shape":"PolicyDocument", - "documentation":"

Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string.

The following example creates a resource policy enabling the Route 53 service to put DNS query logs in to the specified log group. Replace \"logArn\" with the ARN of your CloudWatch Logs resource, such as a log group or log stream.

{ \"Version\": \"2012-10-17\" \"Statement\": [ { \"Sid\": \"Route53LogsToCloudWatchLogs\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": [ \"route53.amazonaws.com\" ] }, \"Action\":\"logs:PutLogEvents\", \"Resource\": logArn } ] }

" + "documentation":"

Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string.

The following example creates a resource policy enabling the Route 53 service to put DNS query logs in to the specified log group. Replace \"logArn\" with the ARN of your CloudWatch Logs resource, such as a log group or log stream.

{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"Route53LogsToCloudWatchLogs\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": [ \"route53.amazonaws.com\" ] }, \"Action\":\"logs:PutLogEvents\", \"Resource\": \"logArn\" } ] }

" } } }, @@ -1980,6 +1984,13 @@ "min":0 }, "Token":{"type":"string"}, + "UnrecognizedClientException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The most likely cause is an invalid AWS access key ID or secret key.

", + "exception":true + }, "UntagLogGroupRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index af56c605..454631b0 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1954,6 +1954,11 @@ "locationName": "minBufferTime", "documentation": "Minimum time of initially buffered media that is needed to ensure smooth playout." }, + "MinFinalSegmentLength": { + "shape": "__doubleMin0Max2147483647", + "locationName": "minFinalSegmentLength", + "documentation": "Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds." + }, "SegmentControl": { "shape": "CmafSegmentControl", "locationName": "segmentControl" @@ -3299,6 +3304,14 @@ "MAIN" ] }, + "H264DynamicSubGop": { + "type": "string", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "enum": [ + "ADAPTIVE", + "STATIC" + ] + }, "H264EntropyEncoding": { "type": "string", "documentation": "Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC.", @@ -3383,12 +3396,32 @@ "MULTI_PASS_HQ" ] }, + "H264QvbrSettings": { + "type": "structure", + "members": { + "MaxAverageBitrate": { + "shape": "__integerMin1000Max1152000000", + "locationName": "maxAverageBitrate", + "documentation": "Use this setting only when Rate control mode is QVBR and Quality tuning level is Multi-pass HQ. For Max average bitrate values suited to the complexity of your input video, the service limits the average bitrate of the video part of this output to the value you choose. That is, the total size of the video element is less than or equal to the value you set multiplied by the number of seconds of encoded output." + }, + "QvbrQualityLevel": { + "shape": "__integerMin1Max10", + "locationName": "qvbrQualityLevel", + "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h264Settings. Specify the target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9." + } + }, + "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode.", + "required": [ + "QvbrQualityLevel" + ] + }, "H264RateControlMode": { "type": "string", - "documentation": "Use this setting to specify whether this output has a variable bitrate (VBR) or constant bitrate (CBR).", + "documentation": "Use this setting to specify whether this output has a variable bitrate (VBR), constant bitrate (CBR) or quality-defined variable bitrate (QVBR).", "enum": [ "VBR", - "CBR" + "CBR", + "QVBR" ] }, "H264RepeatPps": { @@ -3427,6 +3460,11 @@ "shape": "H264CodecProfile", "locationName": "codecProfile" }, + "DynamicSubGop": { + "shape": "H264DynamicSubGop", + "locationName": "dynamicSubGop", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames)." + }, "EntropyEncoding": { "shape": "H264EntropyEncoding", "locationName": "entropyEncoding" @@ -3492,7 +3530,7 @@ "MaxBitrate": { "shape": "__integerMin1000Max1152000000", "locationName": "maxBitrate", - "documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000." + "documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. Required when Rate control mode is QVBR." }, "MinIInterval": { "shape": "__integerMin0Max30", @@ -3527,6 +3565,11 @@ "shape": "H264QualityTuningLevel", "locationName": "qualityTuningLevel" }, + "QvbrSettings": { + "shape": "H264QvbrSettings", + "locationName": "qvbrSettings", + "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." + }, "RateControlMode": { "shape": "H264RateControlMode", "locationName": "rateControlMode" @@ -3679,6 +3722,14 @@ "MAIN_422_10BIT_HIGH" ] }, + "H265DynamicSubGop": { + "type": "string", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "enum": [ + "ADAPTIVE", + "STATIC" + ] + }, "H265FlickerAdaptiveQuantization": { "type": "string", "documentation": "Adjust quantization within each frame to reduce flicker or 'pop' on I-frames.", @@ -3747,12 +3798,32 @@ "MULTI_PASS_HQ" ] }, + "H265QvbrSettings": { + "type": "structure", + "members": { + "MaxAverageBitrate": { + "shape": "__integerMin1000Max1466400000", + "locationName": "maxAverageBitrate", + "documentation": "Use this setting only when Rate control mode is QVBR and Quality tuning level is Multi-pass HQ. For Max average bitrate values suited to the complexity of your input video, the service limits the average bitrate of the video part of this output to the value you choose. That is, the total size of the video element is less than or equal to the value you set multiplied by the number of seconds of encoded output." + }, + "QvbrQualityLevel": { + "shape": "__integerMin1Max10", + "locationName": "qvbrQualityLevel", + "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h265Settings. Specify the target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9." + } + }, + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode.", + "required": [ + "QvbrQualityLevel" + ] + }, "H265RateControlMode": { "type": "string", - "documentation": "Use this setting to specify whether this output has a variable bitrate (VBR) or constant bitrate (CBR).", + "documentation": "Use this setting to specify whether this output has a variable bitrate (VBR), constant bitrate (CBR) or quality-defined variable bitrate (QVBR).", "enum": [ "VBR", - "CBR" + "CBR", + "QVBR" ] }, "H265SampleAdaptiveOffsetFilterMode": { @@ -3796,6 +3867,11 @@ "shape": "H265CodecProfile", "locationName": "codecProfile" }, + "DynamicSubGop": { + "shape": "H265DynamicSubGop", + "locationName": "dynamicSubGop", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames)." + }, "FlickerAdaptiveQuantization": { "shape": "H265FlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization" @@ -3853,7 +3929,7 @@ "MaxBitrate": { "shape": "__integerMin1000Max1466400000", "locationName": "maxBitrate", - "documentation": "Maximum bitrate in bits/second." + "documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. Required when Rate control mode is QVBR." }, "MinIInterval": { "shape": "__integerMin0Max30", @@ -3888,6 +3964,11 @@ "shape": "H265QualityTuningLevel", "locationName": "qualityTuningLevel" }, + "QvbrSettings": { + "shape": "H265QvbrSettings", + "locationName": "qvbrSettings", + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." + }, "RateControlMode": { "shape": "H265RateControlMode", "locationName": "rateControlMode" @@ -4244,6 +4325,11 @@ "shape": "HlsManifestDurationFormat", "locationName": "manifestDurationFormat" }, + "MinFinalSegmentLength": { + "shape": "__doubleMin0Max2147483647", + "locationName": "minFinalSegmentLength", + "documentation": "Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds." + }, "MinSegmentLength": { "shape": "__integerMin0Max2147483647", "locationName": "minSegmentLength", @@ -4722,7 +4808,7 @@ "documentation": "An identifier for this resource that is unique within all of AWS." }, "CreatedAt": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "createdAt", "documentation": "The time, in Unix epoch format in seconds, when the job got created." }, @@ -4853,7 +4939,7 @@ "documentation": "An optional category you create to organize your job templates." }, "CreatedAt": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "createdAt", "documentation": "The timestamp in epoch seconds for Job template creation." }, @@ -4863,7 +4949,7 @@ "documentation": "An optional description you create for each job template." }, "LastUpdated": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "lastUpdated", "documentation": "The timestamp in epoch seconds when the Job template was last updated." }, @@ -5873,6 +5959,14 @@ "PROFILE_422" ] }, + "Mpeg2DynamicSubGop": { + "type": "string", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "enum": [ + "ADAPTIVE", + "STATIC" + ] + }, "Mpeg2FramerateControl": { "type": "string", "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", @@ -5971,6 +6065,11 @@ "shape": "Mpeg2CodecProfile", "locationName": "codecProfile" }, + "DynamicSubGop": { + "shape": "Mpeg2DynamicSubGop", + "locationName": "dynamicSubGop", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames)." + }, "FramerateControl": { "shape": "Mpeg2FramerateControl", "locationName": "framerateControl" @@ -6486,7 +6585,7 @@ "documentation": "An optional category you create to organize your presets." }, "CreatedAt": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "createdAt", "documentation": "The timestamp in epoch seconds for preset creation." }, @@ -6496,7 +6595,7 @@ "documentation": "An optional description you create for each preset." }, "LastUpdated": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "lastUpdated", "documentation": "The timestamp in epoch seconds when the preset was last updated." }, @@ -6679,7 +6778,7 @@ "documentation": "An identifier for this resource that is unique within all of AWS." }, "CreatedAt": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "createdAt", "documentation": "The timestamp in epoch seconds for queue creation." }, @@ -6689,7 +6788,7 @@ "documentation": "An optional description you create for each queue." }, "LastUpdated": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "lastUpdated", "documentation": "The timestamp in epoch seconds when the queue was last updated." }, @@ -6743,24 +6842,24 @@ "type": "structure", "members": { "Height": { - "shape": "__integerMinNegative2147483648Max2147483647", + "shape": "__integerMin2Max2147483647", "locationName": "height", - "documentation": "Height of rectangle in pixels." + "documentation": "Height of rectangle in pixels. Specify only even numbers." }, "Width": { - "shape": "__integerMinNegative2147483648Max2147483647", + "shape": "__integerMin2Max2147483647", "locationName": "width", - "documentation": "Width of rectangle in pixels." + "documentation": "Width of rectangle in pixels. Specify only even numbers." }, "X": { - "shape": "__integerMinNegative2147483648Max2147483647", + "shape": "__integerMin0Max2147483647", "locationName": "x", - "documentation": "The distance, in pixels, between the rectangle and the left edge of the video frame." + "documentation": "The distance, in pixels, between the rectangle and the left edge of the video frame. Specify only even numbers." }, "Y": { - "shape": "__integerMinNegative2147483648Max2147483647", + "shape": "__integerMin0Max2147483647", "locationName": "y", - "documentation": "The distance, in pixels, between the rectangle and the top edge of the video frame." + "documentation": "The distance, in pixels, between the rectangle and the top edge of the video frame. Specify only even numbers." } }, "documentation": "Use Rectangle to identify a specific area of the video frame.", @@ -7046,17 +7145,17 @@ "type": "structure", "members": { "FinishTime": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "finishTime", "documentation": "The time, in Unix epoch format, that the transcoding job finished" }, "StartTime": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "startTime", "documentation": "The time, in Unix epoch format, that transcoding for the job began." }, "SubmitTime": { - "shape": "__timestampIso8601", + "shape": "__timestampUnix", "locationName": "submitTime", "documentation": "The time, in Unix epoch format, that you submitted the job." } @@ -7478,6 +7577,9 @@ "__doubleMin0": { "type": "double" }, + "__doubleMin0Max2147483647": { + "type": "double" + }, "__doubleMinNegative59Max0": { "type": "double" }, @@ -7645,6 +7747,11 @@ "min": 1, "max": 1 }, + "__integerMin1Max10": { + "type": "integer", + "min": 1, + "max": 10 + }, "__integerMin1Max100": { "type": "integer", "min": 1, @@ -7720,6 +7827,11 @@ "min": 25, "max": 2000 }, + "__integerMin2Max2147483647": { + "type": "integer", + "min": 2, + "max": 2147483647 + }, "__integerMin32000Max384000": { "type": "integer", "min": 32000, diff --git a/botocore/data/mediapackage/2017-10-12/service-2.json b/botocore/data/mediapackage/2017-10-12/service-2.json index 5f6d94f7..84093d0d 100644 --- a/botocore/data/mediapackage/2017-10-12/service-2.json +++ b/botocore/data/mediapackage/2017-10-12/service-2.json @@ -776,6 +776,11 @@ "locationName": "minUpdatePeriodSeconds", "shape": "__integer" }, + "PeriodTriggers": { + "documentation": "A list of triggers that controls when the outgoing Dynamic Adaptive Streaming over HTTP (DASH)\nMedia Presentation Description (MPD) will be partitioned into multiple periods. If empty, the content will not\nbe partitioned into more than one period. If the list contains \"ADS\", new periods will be created where\nthe Channel source contains SCTE-35 ad markers.\n", + "locationName": "periodTriggers", + "shape": "__listOf__PeriodTriggersElement" + }, "Profile": { "documentation": "The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to \"HBBTV_1_5\", HbbTV 1.5 compliant output is enabled.", "locationName": "profile", @@ -1833,6 +1838,12 @@ }, "type": "structure" }, + "__PeriodTriggersElement": { + "enum": [ + "ADS" + ], + "type": "string" + }, "__boolean": { "type": "boolean" }, @@ -1872,6 +1883,12 @@ }, "type": "list" }, + "__listOf__PeriodTriggersElement": { + "member": { + "shape": "__PeriodTriggersElement" + }, + "type": "list" + }, "__listOf__string": { "member": { "shape": "__string" diff --git a/botocore/data/mq/2017-11-27/paginators-1.json b/botocore/data/mq/2017-11-27/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/mq/2017-11-27/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/mq/2017-11-27/service-2.json b/botocore/data/mq/2017-11-27/service-2.json index 982e59e4..75c419ac 100644 --- a/botocore/data/mq/2017-11-27/service-2.json +++ b/botocore/data/mq/2017-11-27/service-2.json @@ -28,12 +28,12 @@ "errors" : [ { "shape" : "BadRequestException", "documentation" : "HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it." - }, { - "shape" : "InternalServerErrorException", - "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." }, { "shape" : "UnauthorizedException", "documentation" : "HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated." + }, { + "shape" : "InternalServerErrorException", + "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." }, { "shape" : "ConflictException", "documentation" : "HTTP Status Code 409: Conflict. This Broker name already exists. Retry your request with another name." @@ -70,7 +70,7 @@ "shape" : "ForbiddenException", "documentation" : "HTTP Status Code 403: Access forbidden. Correct your input and then retry your request." } ], - "documentation" : "Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). Note: If the configuration name already exists, Amazon MQ doesn't create a configuration." + "documentation" : "Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version)." }, "CreateUser" : { "name" : "CreateUser", @@ -518,12 +518,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error.", @@ -541,16 +541,21 @@ "documentation" : "The URL of the broker's ActiveMQ Web Console." }, "Endpoints" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "endpoints", "documentation" : "The broker's wire-level protocol endpoints." + }, + "IpAddress" : { + "shape" : "__string", + "locationName" : "ipAddress", + "documentation" : "The IP address of the ENI attached to the broker." } }, "documentation" : "Returns information about all brokers." }, "BrokerState" : { "type" : "string", - "documentation" : "The status of the broker. Possible values: CREATION_IN_PROGRESS, CREATION_FAILED, DELETION_IN_PROGRESS, RUNNING, REBOOT_IN_PROGRESS", + "documentation" : "The status of the broker.", "enum" : [ "CREATION_IN_PROGRESS", "CREATION_FAILED", "DELETION_IN_PROGRESS", "RUNNING", "REBOOT_IN_PROGRESS" ] }, "BrokerSummary" : { @@ -574,24 +579,29 @@ "BrokerState" : { "shape" : "BrokerState", "locationName" : "brokerState", - "documentation" : "The status of the broker. Possible values: CREATION_IN_PROGRESS, CREATION_FAILED, DELETION_IN_PROGRESS, RUNNING, REBOOT_IN_PROGRESS" + "documentation" : "The status of the broker." + }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "The time when the broker was created." }, "DeploymentMode" : { "shape" : "DeploymentMode", "locationName" : "deploymentMode", - "documentation" : "Required. The deployment mode of the broker. Possible values: SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ SINGLE_INSTANCE creates a single-instance broker in a single Availability Zone. ACTIVE_STANDBY_MULTI_AZ creates an active/standby broker for high availability." + "documentation" : "Required. The deployment mode of the broker." }, "HostInstanceType" : { "shape" : "__string", "locationName" : "hostInstanceType", - "documentation" : "The broker's instance type. Possible values: mq.t2.micro, mq.m4.large" + "documentation" : "The broker's instance type." } }, "documentation" : "The Amazon Resource Name (ARN) of the broker." }, "ChangeType" : { "type" : "string", - "documentation" : "The type of change pending for the ActiveMQ user. Possible values: CREATE, UPDATE, DELETE", + "documentation" : "The type of change pending for the ActiveMQ user.", "enum" : [ "CREATE", "UPDATE", "DELETE" ] }, "Configuration" : { @@ -602,6 +612,11 @@ "locationName" : "arn", "documentation" : "Required. The ARN of the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration revision." + }, "Description" : { "shape" : "__string", "locationName" : "description", @@ -646,7 +661,7 @@ "Revision" : { "shape" : "__integer", "locationName" : "revision", - "documentation" : "The Universally Unique Identifier (UUID) of the request." + "documentation" : "The revision number of the configuration." } }, "documentation" : "A list of information about the configuration." @@ -654,6 +669,11 @@ "ConfigurationRevision" : { "type" : "structure", "members" : { + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration revision." + }, "Description" : { "shape" : "__string", "locationName" : "description", @@ -662,7 +682,7 @@ "Revision" : { "shape" : "__integer", "locationName" : "revision", - "documentation" : "Required. The revision of the configuration." + "documentation" : "Required. The revision number of the configuration." } }, "documentation" : "Returns information about the specified configuration revision." @@ -676,7 +696,7 @@ "documentation" : "The current configuration of the broker." }, "History" : { - "shape" : "ListOfConfigurationId", + "shape" : "__listOfConfigurationId", "locationName" : "history", "documentation" : "The history of configurations applied to the broker." }, @@ -694,12 +714,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error.", @@ -735,7 +755,7 @@ "DeploymentMode" : { "shape" : "DeploymentMode", "locationName" : "deploymentMode", - "documentation" : "Required. The deployment mode of the broker. Possible values: SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ SINGLE_INSTANCE creates a single-instance broker in a single Availability Zone. ACTIVE_STANDBY_MULTI_AZ creates an active/standby broker for high availability." + "documentation" : "Required. The deployment mode of the broker." }, "EngineType" : { "shape" : "EngineType", @@ -750,7 +770,12 @@ "HostInstanceType" : { "shape" : "__string", "locationName" : "hostInstanceType", - "documentation" : "Required. The broker's instance type. Possible values: mq.t2.micro, mq.m4.large" + "documentation" : "Required. The broker's instance type." + }, + "Logs" : { + "shape" : "Logs", + "locationName" : "logs", + "documentation" : "Enables Amazon CloudWatch logging for brokers." }, "MaintenanceWindowStartTime" : { "shape" : "WeeklyStartTime", @@ -763,17 +788,17 @@ "documentation" : "Required. Enables connections from applications outside of the VPC that hosts the broker's subnets." }, "SecurityGroups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." + "documentation" : "The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." }, "SubnetIds" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "Required. The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." + "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." }, "Users" : { - "shape" : "ListOfUser", + "shape" : "__listOfUser", "locationName" : "users", "documentation" : "Required. The list of ActiveMQ users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } @@ -823,7 +848,7 @@ "DeploymentMode" : { "shape" : "DeploymentMode", "locationName" : "deploymentMode", - "documentation" : "Required. The deployment mode of the broker. Possible values: SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ SINGLE_INSTANCE creates a single-instance broker in a single Availability Zone. ACTIVE_STANDBY_MULTI_AZ creates an active/standby broker for high availability." + "documentation" : "Required. The deployment mode of the broker." }, "EngineType" : { "shape" : "EngineType", @@ -838,7 +863,12 @@ "HostInstanceType" : { "shape" : "__string", "locationName" : "hostInstanceType", - "documentation" : "Required. The broker's instance type. Possible values: mq.t2.micro, mq.m4.large" + "documentation" : "Required. The broker's instance type." + }, + "Logs" : { + "shape" : "Logs", + "locationName" : "logs", + "documentation" : "Enables Amazon CloudWatch logging for brokers." }, "MaintenanceWindowStartTime" : { "shape" : "WeeklyStartTime", @@ -851,17 +881,17 @@ "documentation" : "Required. Enables connections from applications outside of the VPC that hosts the broker's subnets." }, "SecurityGroups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." + "documentation" : "The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." }, "SubnetIds" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "Required. The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." + "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." }, "Users" : { - "shape" : "ListOfUser", + "shape" : "__listOfUser", "locationName" : "users", "documentation" : "Required. The list of ActiveMQ users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } @@ -902,7 +932,7 @@ "documentation" : "Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long." } }, - "documentation" : "Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). Note: If the configuration name already exists, Amazon MQ doesn't create a configuration." + "documentation" : "Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version)." }, "CreateConfigurationOutput" : { "type" : "structure", @@ -912,6 +942,11 @@ "locationName" : "arn", "documentation" : "Required. The Amazon Resource Name (ARN) of the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration." + }, "Id" : { "shape" : "__string", "locationName" : "id", @@ -949,7 +984,7 @@ "documentation" : "Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long." } }, - "documentation" : "Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). Note: If the configuration name already exists, Amazon MQ doesn't create a configuration." + "documentation" : "Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version)." }, "CreateConfigurationResponse" : { "type" : "structure", @@ -959,6 +994,11 @@ "locationName" : "arn", "documentation" : "Required. The Amazon Resource Name (ARN) of the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration." + }, "Id" : { "shape" : "__string", "locationName" : "id", @@ -985,7 +1025,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -1012,7 +1052,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -1096,7 +1136,7 @@ }, "DeploymentMode" : { "type" : "string", - "documentation" : "The deployment mode of the broker. Possible values: SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ SINGLE_INSTANCE creates a single-instance broker in a single Availability Zone. ACTIVE_STANDBY_MULTI_AZ creates an active/standby broker for high availability.", + "documentation" : "The deployment mode of the broker.", "enum" : [ "SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ" ] }, "DescribeBrokerOutput" : { @@ -1118,7 +1158,7 @@ "documentation" : "The unique ID that Amazon MQ generates for the broker." }, "BrokerInstances" : { - "shape" : "ListOfBrokerInstance", + "shape" : "__listOfBrokerInstance", "locationName" : "brokerInstances", "documentation" : "A list of information about allocated brokers." }, @@ -1130,17 +1170,22 @@ "BrokerState" : { "shape" : "BrokerState", "locationName" : "brokerState", - "documentation" : "The status of the broker. Possible values: CREATION_IN_PROGRESS, CREATION_FAILED, DELETION_IN_PROGRESS, RUNNING, REBOOT_IN_PROGRESS" + "documentation" : "The status of the broker." }, "Configurations" : { "shape" : "Configurations", "locationName" : "configurations", "documentation" : "The list of all revisions for the specified configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "The time when the broker was created." + }, "DeploymentMode" : { "shape" : "DeploymentMode", "locationName" : "deploymentMode", - "documentation" : "Required. The deployment mode of the broker. Possible values: SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ SINGLE_INSTANCE creates a single-instance broker in a single Availability Zone. ACTIVE_STANDBY_MULTI_AZ creates an active/standby broker for high availability." + "documentation" : "Required. The deployment mode of the broker." }, "EngineType" : { "shape" : "EngineType", @@ -1155,7 +1200,12 @@ "HostInstanceType" : { "shape" : "__string", "locationName" : "hostInstanceType", - "documentation" : "The broker's instance type. Possible values: mq.t2.micro, mq.m4.large" + "documentation" : "The broker's instance type." + }, + "Logs" : { + "shape" : "LogsSummary", + "locationName" : "logs", + "documentation" : "The list of information about logs currently enabled and pending to be deployed for the specified broker." }, "MaintenanceWindowStartTime" : { "shape" : "WeeklyStartTime", @@ -1168,17 +1218,17 @@ "documentation" : "Required. Enables connections from applications outside of the VPC that hosts the broker's subnets." }, "SecurityGroups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "securityGroups", "documentation" : "Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." }, "SubnetIds" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "subnetIds", "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." }, "Users" : { - "shape" : "ListOfUserSummary", + "shape" : "__listOfUserSummary", "locationName" : "users", "documentation" : "The list of all ActiveMQ usernames for the specified broker." } @@ -1216,7 +1266,7 @@ "documentation" : "The unique ID that Amazon MQ generates for the broker." }, "BrokerInstances" : { - "shape" : "ListOfBrokerInstance", + "shape" : "__listOfBrokerInstance", "locationName" : "brokerInstances", "documentation" : "A list of information about allocated brokers." }, @@ -1228,17 +1278,22 @@ "BrokerState" : { "shape" : "BrokerState", "locationName" : "brokerState", - "documentation" : "The status of the broker. Possible values: CREATION_IN_PROGRESS, CREATION_FAILED, DELETION_IN_PROGRESS, RUNNING, REBOOT_IN_PROGRESS" + "documentation" : "The status of the broker." }, "Configurations" : { "shape" : "Configurations", "locationName" : "configurations", "documentation" : "The list of all revisions for the specified configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "The time when the broker was created." + }, "DeploymentMode" : { "shape" : "DeploymentMode", "locationName" : "deploymentMode", - "documentation" : "Required. The deployment mode of the broker. Possible values: SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ SINGLE_INSTANCE creates a single-instance broker in a single Availability Zone. ACTIVE_STANDBY_MULTI_AZ creates an active/standby broker for high availability." + "documentation" : "Required. The deployment mode of the broker." }, "EngineType" : { "shape" : "EngineType", @@ -1253,7 +1308,12 @@ "HostInstanceType" : { "shape" : "__string", "locationName" : "hostInstanceType", - "documentation" : "The broker's instance type. Possible values: mq.t2.micro, mq.m4.large" + "documentation" : "The broker's instance type." + }, + "Logs" : { + "shape" : "LogsSummary", + "locationName" : "logs", + "documentation" : "The list of information about logs currently enabled and pending to be deployed for the specified broker." }, "MaintenanceWindowStartTime" : { "shape" : "WeeklyStartTime", @@ -1266,17 +1326,17 @@ "documentation" : "Required. Enables connections from applications outside of the VPC that hosts the broker's subnets." }, "SecurityGroups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "securityGroups", "documentation" : "Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." }, "SubnetIds" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "subnetIds", "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." }, "Users" : { - "shape" : "ListOfUserSummary", + "shape" : "__listOfUserSummary", "locationName" : "users", "documentation" : "The list of all ActiveMQ usernames for the specified broker." } @@ -1302,6 +1362,11 @@ "locationName" : "arn", "documentation" : "Required. The ARN of the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration revision." + }, "Description" : { "shape" : "__string", "locationName" : "description", @@ -1342,6 +1407,11 @@ "locationName" : "configurationId", "documentation" : "Required. The unique ID that Amazon MQ generates for the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration." + }, "Data" : { "shape" : "__string", "locationName" : "data", @@ -1381,6 +1451,11 @@ "locationName" : "configurationId", "documentation" : "Required. The unique ID that Amazon MQ generates for the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration." + }, "Data" : { "shape" : "__string", "locationName" : "data", @@ -1407,7 +1482,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -1456,7 +1531,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -1483,12 +1558,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error." @@ -1499,12 +1574,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error.", @@ -1519,12 +1594,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error.", @@ -1537,7 +1612,7 @@ "type" : "structure", "members" : { "BrokerSummaries" : { - "shape" : "ListOfBrokerSummary", + "shape" : "__listOfBrokerSummary", "locationName" : "brokerSummaries", "documentation" : "A list of information about all brokers." }, @@ -1570,7 +1645,7 @@ "type" : "structure", "members" : { "BrokerSummaries" : { - "shape" : "ListOfBrokerSummary", + "shape" : "__listOfBrokerSummary", "locationName" : "brokerSummaries", "documentation" : "A list of information about all brokers." }, @@ -1600,7 +1675,7 @@ "documentation" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty." }, "Revisions" : { - "shape" : "ListOfConfigurationRevision", + "shape" : "__listOfConfigurationRevision", "locationName" : "revisions", "documentation" : "The list of all revisions for the specified configuration." } @@ -1650,7 +1725,7 @@ "documentation" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty." }, "Revisions" : { - "shape" : "ListOfConfigurationRevision", + "shape" : "__listOfConfigurationRevision", "locationName" : "revisions", "documentation" : "The list of all revisions for the specified configuration." } @@ -1660,7 +1735,7 @@ "type" : "structure", "members" : { "Configurations" : { - "shape" : "ListOfConfiguration", + "shape" : "__listOfConfiguration", "locationName" : "configurations", "documentation" : "The list of all revisions for the specified configuration." }, @@ -1698,7 +1773,7 @@ "type" : "structure", "members" : { "Configurations" : { - "shape" : "ListOfConfiguration", + "shape" : "__listOfConfiguration", "locationName" : "configurations", "documentation" : "The list of all revisions for the specified configuration." }, @@ -1714,60 +1789,6 @@ } } }, - "ListOfBrokerInstance" : { - "type" : "list", - "member" : { - "shape" : "BrokerInstance" - } - }, - "ListOfBrokerSummary" : { - "type" : "list", - "member" : { - "shape" : "BrokerSummary" - } - }, - "ListOfConfiguration" : { - "type" : "list", - "member" : { - "shape" : "Configuration" - } - }, - "ListOfConfigurationId" : { - "type" : "list", - "member" : { - "shape" : "ConfigurationId" - } - }, - "ListOfConfigurationRevision" : { - "type" : "list", - "member" : { - "shape" : "ConfigurationRevision" - } - }, - "ListOfSanitizationWarning" : { - "type" : "list", - "member" : { - "shape" : "SanitizationWarning" - } - }, - "ListOfUser" : { - "type" : "list", - "member" : { - "shape" : "User" - } - }, - "ListOfUserSummary" : { - "type" : "list", - "member" : { - "shape" : "UserSummary" - } - }, - "ListOf__string" : { - "type" : "list", - "member" : { - "shape" : "__string" - } - }, "ListUsersOutput" : { "type" : "structure", "members" : { @@ -1777,7 +1798,7 @@ "documentation" : "Required. The unique ID that Amazon MQ generates for the broker." }, "MaxResults" : { - "shape" : "__integer", + "shape" : "__integerMin5Max100", "locationName" : "maxResults", "documentation" : "Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100." }, @@ -1787,7 +1808,7 @@ "documentation" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty." }, "Users" : { - "shape" : "ListOfUserSummary", + "shape" : "__listOfUserSummary", "locationName" : "users", "documentation" : "Required. The list of all ActiveMQ usernames for the specified broker." } @@ -1827,7 +1848,7 @@ "documentation" : "Required. The unique ID that Amazon MQ generates for the broker." }, "MaxResults" : { - "shape" : "__integer", + "shape" : "__integerMin5Max100", "locationName" : "maxResults", "documentation" : "Required. The maximum number of ActiveMQ users that can be returned per page (20 by default). This value must be an integer from 5 to 100." }, @@ -1837,12 +1858,59 @@ "documentation" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty." }, "Users" : { - "shape" : "ListOfUserSummary", + "shape" : "__listOfUserSummary", "locationName" : "users", "documentation" : "Required. The list of all ActiveMQ usernames for the specified broker." } } }, + "Logs" : { + "type" : "structure", + "members" : { + "Audit" : { + "shape" : "__boolean", + "locationName" : "audit", + "documentation" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged." + }, + "General" : { + "shape" : "__boolean", + "locationName" : "general", + "documentation" : "Enables general logging." + } + }, + "documentation" : "The list of information about logs to be enabled for the specified broker." + }, + "LogsSummary" : { + "type" : "structure", + "members" : { + "Audit" : { + "shape" : "__boolean", + "locationName" : "audit", + "documentation" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged." + }, + "AuditLogGroup" : { + "shape" : "__string", + "locationName" : "auditLogGroup", + "documentation" : "Location of CloudWatch Log group where audit logs will be sent." + }, + "General" : { + "shape" : "__boolean", + "locationName" : "general", + "documentation" : "Enables general logging." + }, + "GeneralLogGroup" : { + "shape" : "__string", + "locationName" : "generalLogGroup", + "documentation" : "Location of CloudWatch Log group where general logs will be sent." + }, + "Pending" : { + "shape" : "PendingLogs", + "locationName" : "pending", + "documentation" : "The list of information about logs pending to be deployed for the specified broker." + } + }, + "documentation" : "The list of information about logs currently enabled and pending to be deployed for the specified broker." + }, "MaxResults" : { "type" : "integer", "min" : 1, @@ -1854,12 +1922,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error.", @@ -1868,6 +1936,22 @@ "httpStatusCode" : 404 } }, + "PendingLogs" : { + "type" : "structure", + "members" : { + "Audit" : { + "shape" : "__boolean", + "locationName" : "audit", + "documentation" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged." + }, + "General" : { + "shape" : "__boolean", + "locationName" : "general", + "documentation" : "Enables general logging." + } + }, + "documentation" : "The list of information about logs to be enabled for the specified broker." + }, "RebootBrokerRequest" : { "type" : "structure", "members" : { @@ -1900,14 +1984,14 @@ "Reason" : { "shape" : "SanitizationWarningReason", "locationName" : "reason", - "documentation" : "Required. The reason for which the XML elements or attributes were sanitized. Possible values: DISALLOWED_ELEMENT_REMOVED, DISALLOWED_ATTRIBUTE_REMOVED, INVALID_ATTRIBUTE_VALUE_REMOVED DISALLOWED_ELEMENT_REMOVED shows that the provided element isn't allowed and has been removed. DISALLOWED_ATTRIBUTE_REMOVED shows that the provided attribute isn't allowed and has been removed. INVALID_ATTRIBUTE_VALUE_REMOVED shows that the provided value for the attribute isn't allowed and has been removed." + "documentation" : "Required. The reason for which the XML elements or attributes were sanitized." } }, "documentation" : "Returns information about the XML element or attribute that was sanitized in the configuration." }, "SanitizationWarningReason" : { "type" : "string", - "documentation" : "The reason for which the XML elements or attributes were sanitized. Possible values: DISALLOWED_ELEMENT_REMOVED, DISALLOWED_ATTRIBUTE_REMOVED, INVALID_ATTRIBUTE_VALUE_REMOVED DISALLOWED_ELEMENT_REMOVED shows that the provided element isn't allowed and has been removed. DISALLOWED_ATTRIBUTE_REMOVED shows that the provided attribute isn't allowed and has been removed. INVALID_ATTRIBUTE_VALUE_REMOVED shows that the provided value for the attribute isn't allowed and has been removed.", + "documentation" : "The reason for which the XML elements or attributes were sanitized.", "enum" : [ "DISALLOWED_ELEMENT_REMOVED", "DISALLOWED_ATTRIBUTE_REMOVED", "INVALID_ATTRIBUTE_VALUE_REMOVED" ] }, "UnauthorizedException" : { @@ -1916,12 +2000,12 @@ "ErrorAttribute" : { "shape" : "__string", "locationName" : "errorAttribute", - "documentation" : "The error attribute." + "documentation" : "The attribute which caused the error." }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The error message." + "documentation" : "The explanation of the error." } }, "documentation" : "Returns information about an error.", @@ -1937,6 +2021,11 @@ "shape" : "ConfigurationId", "locationName" : "configuration", "documentation" : "A list of information about the configuration." + }, + "Logs" : { + "shape" : "Logs", + "locationName" : "logs", + "documentation" : "Enables Amazon CloudWatch logging for brokers." } }, "documentation" : "Updates the broker using the specified properties." @@ -1953,6 +2042,11 @@ "shape" : "ConfigurationId", "locationName" : "configuration", "documentation" : "The ID of the updated configuration." + }, + "Logs" : { + "shape" : "Logs", + "locationName" : "logs", + "documentation" : "The list of information about logs to be enabled for the specified broker." } }, "documentation" : "Returns information about the updated broker." @@ -1970,6 +2064,11 @@ "shape" : "ConfigurationId", "locationName" : "configuration", "documentation" : "A list of information about the configuration." + }, + "Logs" : { + "shape" : "Logs", + "locationName" : "logs", + "documentation" : "Enables Amazon CloudWatch logging for brokers." } }, "documentation" : "Updates the broker using the specified properties.", @@ -1987,6 +2086,11 @@ "shape" : "ConfigurationId", "locationName" : "configuration", "documentation" : "The ID of the updated configuration." + }, + "Logs" : { + "shape" : "Logs", + "locationName" : "logs", + "documentation" : "The list of information about logs to be enabled for the specified broker." } } }, @@ -2014,6 +2118,11 @@ "locationName" : "arn", "documentation" : "Required. The Amazon Resource Name (ARN) of the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration." + }, "Id" : { "shape" : "__string", "locationName" : "id", @@ -2030,7 +2139,7 @@ "documentation" : "Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long." }, "Warnings" : { - "shape" : "ListOfSanitizationWarning", + "shape" : "__listOfSanitizationWarning", "locationName" : "warnings", "documentation" : "The list of the first 20 warnings about the configuration XML elements or attributes that were sanitized." } @@ -2068,6 +2177,11 @@ "locationName" : "arn", "documentation" : "Required. The Amazon Resource Name (ARN) of the configuration." }, + "Created" : { + "shape" : "__timestampIso8601", + "locationName" : "created", + "documentation" : "Required. The date and time of the configuration." + }, "Id" : { "shape" : "__string", "locationName" : "id", @@ -2084,7 +2198,7 @@ "documentation" : "Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long." }, "Warnings" : { - "shape" : "ListOfSanitizationWarning", + "shape" : "__listOfSanitizationWarning", "locationName" : "warnings", "documentation" : "The list of the first 20 warnings about the configuration XML elements or attributes that were sanitized." } @@ -2099,7 +2213,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -2126,7 +2240,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -2158,7 +2272,7 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, @@ -2184,14 +2298,14 @@ "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." }, "Groups" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "groups", "documentation" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." }, "PendingChange" : { "shape" : "ChangeType", "locationName" : "pendingChange", - "documentation" : "Required. The type of change pending for the ActiveMQ user. Possible values: CREATE, UPDATE, DELETE" + "documentation" : "Required. The type of change pending for the ActiveMQ user." } }, "documentation" : "Returns information about the status of the changes pending for the ActiveMQ user." @@ -2202,7 +2316,7 @@ "PendingChange" : { "shape" : "ChangeType", "locationName" : "pendingChange", - "documentation" : "The type of change pending for the ActiveMQ user. Possible values: CREATE, UPDATE, DELETE" + "documentation" : "The type of change pending for the ActiveMQ user." }, "Username" : { "shape" : "__string", @@ -2218,7 +2332,7 @@ "DayOfWeek" : { "shape" : "DayOfWeek", "locationName" : "dayOfWeek", - "documentation" : "Required. The day of the week. Possible values: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY" + "documentation" : "Required. The day of the week." }, "TimeOfDay" : { "shape" : "__string", @@ -2242,11 +2356,78 @@ "__integer" : { "type" : "integer" }, + "__integerMin5Max100" : { + "type" : "integer", + "min" : 5, + "max" : 100 + }, + "__listOfBrokerInstance" : { + "type" : "list", + "member" : { + "shape" : "BrokerInstance" + } + }, + "__listOfBrokerSummary" : { + "type" : "list", + "member" : { + "shape" : "BrokerSummary" + } + }, + "__listOfConfiguration" : { + "type" : "list", + "member" : { + "shape" : "Configuration" + } + }, + "__listOfConfigurationId" : { + "type" : "list", + "member" : { + "shape" : "ConfigurationId" + } + }, + "__listOfConfigurationRevision" : { + "type" : "list", + "member" : { + "shape" : "ConfigurationRevision" + } + }, + "__listOfSanitizationWarning" : { + "type" : "list", + "member" : { + "shape" : "SanitizationWarning" + } + }, + "__listOfUser" : { + "type" : "list", + "member" : { + "shape" : "User" + } + }, + "__listOfUserSummary" : { + "type" : "list", + "member" : { + "shape" : "UserSummary" + } + }, + "__listOf__string" : { + "type" : "list", + "member" : { + "shape" : "__string" + } + }, + "__long" : { + "type" : "long" + }, "__string" : { "type" : "string" }, - "__timestamp" : { - "type" : "timestamp" + "__timestampIso8601" : { + "type" : "timestamp", + "timestampFormat" : "iso8601" + }, + "__timestampUnix" : { + "type" : "timestamp", + "timestampFormat" : "unixTimestamp" } }, "documentation" : "Amazon MQ is a managed message broker service for Apache ActiveMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols." diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 1ac19d12..31192a3c 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -710,7 +710,7 @@ "shape" : "TooManyRequestsException", "documentation" : "429 response" } ], - "documentation" : "Deletes endpoints associated with an user id." + "documentation" : "Deletes endpoints that are associated with a User ID." }, "GetAdmChannel" : { "name" : "GetAdmChannel", @@ -1795,7 +1795,7 @@ "shape" : "TooManyRequestsException", "documentation" : "429 response" } ], - "documentation" : "Returns information about the endpoints associated with an user id." + "documentation" : "Returns information about the endpoints that are associated with a User ID." }, "PhoneNumberValidate" : { "name" : "PhoneNumberValidate", @@ -1867,6 +1867,41 @@ } ], "documentation" : "Use to create or update the event stream for an app." }, + "PutEvents" : { + "name" : "PutEvents", + "http" : { + "method" : "POST", + "requestUri" : "/v1/apps/{application-id}/events", + "responseCode" : 202 + }, + "input" : { + "shape" : "PutEventsRequest" + }, + "output" : { + "shape" : "PutEventsResponse", + "documentation" : "202 response" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "400 response" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "500 response" + }, { + "shape" : "ForbiddenException", + "documentation" : "403 response" + }, { + "shape" : "NotFoundException", + "documentation" : "404 response" + }, { + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "429 response" + } ], + "documentation" : "Use to record events for endpoints. This method creates events and creates or updates the endpoints that those events are associated with." + }, "RemoveAttributes" : { "name" : "RemoveAttributes", "http" : { @@ -1935,7 +1970,7 @@ "shape" : "TooManyRequestsException", "documentation" : "429 response" } ], - "documentation" : "Use this resource to send a direct message, which is a one time message that you send to a limited audience without creating a campaign. \n\nYou can send the message to up to 100 recipients. You cannot use the message to engage a segment. When you send the message, Amazon Pinpoint delivers it immediately, and you cannot schedule the delivery. To engage a user segment, and to schedule the message delivery, create a campaign instead of sending a direct message.\n\nYou can send a direct message as a push notification to your mobile app or as an SMS message to SMS-enabled devices." + "documentation" : "Used to send a direct message." }, "SendUsersMessages" : { "name" : "SendUsersMessages", @@ -1970,7 +2005,7 @@ "shape" : "TooManyRequestsException", "documentation" : "429 response" } ], - "documentation" : "Use this resource to message a list of users. Amazon Pinpoint sends the message to all of the endpoints that are associated with each user.\n\nA user represents an individual who is assigned a unique user ID, and this ID is assigned to one or more endpoints. For example, if an individual uses your app on multiple devices, your app could assign that person's user ID to the endpoint for each device.\n\nWith the users-messages resource, you specify the message recipients as user IDs. For each user ID, Amazon Pinpoint delivers the message to all of the user's endpoints. Within the body of your request, you can specify a default message, and you can tailor your message for different channels, including those for mobile push and SMS.\n\nWith this resource, you send a direct message, which is a one time message that you send to a limited audience without creating a campaign. You can send the message to up to 100 users per request. You cannot use the message to engage a segment. When you send the message, Amazon Pinpoint delivers it immediately, and you cannot schedule the delivery. To engage a user segment, and to schedule the message delivery, create a campaign instead of using the users-messages resource." + "documentation" : "Used to send a message to a list of users." }, "UpdateAdmChannel" : { "name" : "UpdateAdmChannel", @@ -2425,7 +2460,7 @@ "shape" : "TooManyRequestsException", "documentation" : "429 response" } ], - "documentation" : "Use to update a segment." + "documentation" : "Used to update a segment." }, "UpdateSmsChannel" : { "name" : "UpdateSmsChannel", @@ -2469,15 +2504,15 @@ "members" : { "ClientId" : { "shape" : "__string", - "documentation" : "Client ID as gotten from Amazon" + "documentation" : "The Client ID that you obtained from the Amazon App Distribution Portal." }, "ClientSecret" : { "shape" : "__string", - "documentation" : "Client secret as gotten from Amazon" + "documentation" : "The Client Secret that you obtained from the Amazon App Distribution Portal." }, "Enabled" : { "shape" : "__boolean", - "documentation" : "If the channel is enabled for sending messages." + "documentation" : "Indicates whether or not the channel is enabled for sending messages." } }, "documentation" : "Amazon Device Messaging channel definition.", @@ -2492,11 +2527,11 @@ }, "CreationDate" : { "shape" : "__string", - "documentation" : "When was this segment created" + "documentation" : "The date and time when this channel was created." }, "Enabled" : { "shape" : "__boolean", - "documentation" : "If the channel is enabled for sending messages." + "documentation" : "Indicates whether or not the channel is enabled for sending messages." }, "HasCredential" : { "shape" : "__boolean", @@ -2504,27 +2539,27 @@ }, "Id" : { "shape" : "__string", - "documentation" : "Channel ID. Not used, only for backwards compatibility." + "documentation" : "(Deprecated) An identifier for the channel. Retained for backwards compatibility." }, "IsArchived" : { "shape" : "__boolean", - "documentation" : "Is this channel archived" + "documentation" : "Indicates whether or not the channel is archived." }, "LastModifiedBy" : { "shape" : "__string", - "documentation" : "Who last updated this entry" + "documentation" : "The user who last updated this channel." }, "LastModifiedDate" : { "shape" : "__string", - "documentation" : "Last date this was updated" + "documentation" : "The date and time when this channel was last modified." }, "Platform" : { "shape" : "__string", - "documentation" : "Platform type. Will be \"ADM\"" + "documentation" : "The platform type. For this channel, the value is always \"ADM.\"" }, "Version" : { "shape" : "__integer", - "documentation" : "Version of channel" + "documentation" : "The channel version." } }, "documentation" : "Amazon Device Messaging channel definition.", @@ -2539,7 +2574,7 @@ }, "Body" : { "shape" : "__string", - "documentation" : "The message body of the notification, the email body or the text message." + "documentation" : "The message body of the notification." }, "ConsolidationKey" : { "shape" : "__string", @@ -2644,11 +2679,11 @@ "members" : { "ApplicationId" : { "shape" : "__string", - "documentation" : "The ID of the application to which the channel applies." + "documentation" : "The ID of the application that the channel applies to." }, "CreationDate" : { "shape" : "__string", - "documentation" : "When was this segment created" + "documentation" : "The date and time when this channel was created." }, "DefaultAuthenticationMethod" : { "shape" : "__string", @@ -2668,27 +2703,27 @@ }, "Id" : { "shape" : "__string", - "documentation" : "Channel ID. Not used. Present only for backwards compatibility." + "documentation" : "(Deprecated) An identifier for the channel. Retained for backwards compatibility." }, "IsArchived" : { "shape" : "__boolean", - "documentation" : "Is this channel archived" + "documentation" : "Indicates whether or not the channel is archived." }, "LastModifiedBy" : { "shape" : "__string", - "documentation" : "Who last updated this entry" + "documentation" : "The user who last updated this channel." }, "LastModifiedDate" : { "shape" : "__string", - "documentation" : "Last date this was updated" + "documentation" : "The date and time when this channel was last modified." }, "Platform" : { "shape" : "__string", - "documentation" : "The platform type. Will be APNS." + "documentation" : "The platform type. For this channel, the value is always \"ADM.\"" }, "Version" : { "shape" : "__integer", - "documentation" : "Version of channel" + "documentation" : "The channel version." } }, "documentation" : "Apple Distribution Push Notification Service channel definition.", @@ -2707,7 +2742,7 @@ }, "Body" : { "shape" : "__string", - "documentation" : "The message body of the notification, the email body or the text message." + "documentation" : "The message body of the notification." }, "Category" : { "shape" : "__string", @@ -3060,6 +3095,10 @@ "Item" : { "shape" : "ListOfActivityResponse", "documentation" : "List of campaign activities" + }, + "NextToken" : { + "shape" : "__string", + "documentation" : "The string that you use in a subsequent request to get the next page of results in a paginated response." } }, "documentation" : "Activities for campaign.", @@ -3345,7 +3384,7 @@ }, "Body" : { "shape" : "__string", - "documentation" : "The message body of the notification, the email body or the text message." + "documentation" : "The message body of the notification." }, "Data" : { "shape" : "MapOf__string", @@ -3799,7 +3838,7 @@ "documentation" : "Default message substitutions. Can be overridden by individual address substitutions." } }, - "documentation" : "Default Message across push notification, email, and sms." + "documentation" : "The default message to use across all channels." }, "DefaultPushNotificationMessage" : { "type" : "structure", @@ -3810,7 +3849,7 @@ }, "Body" : { "shape" : "__string", - "documentation" : "The message body of the notification, the email body or the text message." + "documentation" : "The message body of the notification." }, "Data" : { "shape" : "MapOf__string", @@ -4327,7 +4366,7 @@ "members" : { "Address" : { "shape" : "__string", - "documentation" : "The destination for messages that you send to this endpoint. The address varies by channel. For mobile push channels, use the token provided by the push notification service, such as the APNs device token or the FCM registration token. For the SMS channel, use a phone number in E.164 format, such as +1206XXX5550100. For the email channel, use an email address." + "documentation" : "The destination for messages that you send to this endpoint. The address varies by channel. For mobile push channels, use the token provided by the push notification service, such as the APNs device token or the FCM registration token. For the SMS channel, use a phone number in E.164 format, such as +12065550100. For the email channel, use an email address." }, "Attributes" : { "shape" : "MapOfListOf__string", @@ -4400,30 +4439,44 @@ }, "Make" : { "shape" : "__string", - "documentation" : "The endpoint make, such as such as Apple or Samsung." + "documentation" : "The manufacturer of the endpoint device, such as Apple or Samsung." }, "Model" : { "shape" : "__string", - "documentation" : "The endpoint model, such as iPhone." + "documentation" : "The model name or number of the endpoint device, such as iPhone." }, "ModelVersion" : { "shape" : "__string", - "documentation" : "The endpoint model version." + "documentation" : "The model version of the endpoint device." }, "Platform" : { "shape" : "__string", - "documentation" : "The endpoint platform, such as ios or android." + "documentation" : "The platform of the endpoint device, such as iOS or Android." }, "PlatformVersion" : { "shape" : "__string", - "documentation" : "The endpoint platform version." + "documentation" : "The platform version of the endpoint device." }, "Timezone" : { "shape" : "__string", "documentation" : "The timezone of the endpoint. Specified as a tz database value, such as Americas/Los_Angeles." } }, - "documentation" : "Endpoint demographic data" + "documentation" : "Demographic information about the endpoint." + }, + "EndpointItemResponse" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string", + "documentation" : "A custom message associated with the registration of an endpoint when issuing a response." + }, + "StatusCode" : { + "shape" : "__integer", + "documentation" : "The status code to respond with for a particular endpoint id after endpoint registration" + } + }, + "documentation" : "The responses that are returned after you create or update an endpoint and record an event." }, "EndpointLocation" : { "type" : "structure", @@ -4434,15 +4487,15 @@ }, "Country" : { "shape" : "__string", - "documentation" : "Country according to ISO 3166-1 Alpha-2 codes. For example, US." + "documentation" : "The two-letter code for the country or region of the endpoint. Specified as an ISO 3166-1 Alpha-2 code, such as \"US\" for the United States." }, "Latitude" : { "shape" : "__double", - "documentation" : "The latitude of the endpoint location. Rounded to one decimal (Roughly corresponding to a mile)." + "documentation" : "The latitude of the endpoint location, rounded to one decimal place." }, "Longitude" : { "shape" : "__double", - "documentation" : "The longitude of the endpoint location. Rounded to one decimal (Roughly corresponding to a mile)." + "documentation" : "The longitude of the endpoint location, rounded to one decimal place." }, "PostalCode" : { "shape" : "__string", @@ -4450,10 +4503,10 @@ }, "Region" : { "shape" : "__string", - "documentation" : "The region of the endpoint location. For example, corresponds to a state in US." + "documentation" : "The region of the endpoint location. For example, in the United States, this corresponds to a state." } }, - "documentation" : "Endpoint location data" + "documentation" : "Location data for the endpoint." }, "EndpointMessageResult" : { "type" : "structure", @@ -4464,7 +4517,7 @@ }, "DeliveryStatus" : { "shape" : "DeliveryStatus", - "documentation" : "Delivery status of message." + "documentation" : "The delivery status of the message. Possible values:\n\nSUCCESS - The message was successfully delivered to the endpoint.\n\nTRANSIENT_FAILURE - A temporary error occurred. Amazon Pinpoint will attempt to deliver the message again later.\n\nFAILURE_PERMANENT - An error occurred when delivering the message to the endpoint. Amazon Pinpoint won't attempt to send the message again.\n\nTIMEOUT - The message couldn't be sent within the timeout period.\n\nQUIET_TIME - The local time for the endpoint was within the Quiet Hours for the campaign.\n\nDAILY_CAP - The endpoint has received the maximum number of messages it can receive within a 24-hour period.\n\nHOLDOUT - The endpoint was in a hold out treatment for the campaign.\n\nTHROTTLED - Amazon Pinpoint throttled sending to this endpoint.\n\nEXPIRED - The endpoint address is expired.\n\nCAMPAIGN_CAP - The endpoint received the maximum number of messages allowed by the campaign.\n\nSERVICE_FAILURE - A service-level failure prevented Amazon Pinpoint from delivering the message.\n\nUNKNOWN - An unknown error occurred." }, "MessageId" : { "shape" : "__string", @@ -4491,7 +4544,7 @@ "members" : { "Address" : { "shape" : "__string", - "documentation" : "The destination for messages that you send to this endpoint. The address varies by channel. For mobile push channels, use the token provided by the push notification service, such as the APNs device token or the FCM registration token. For the SMS channel, use a phone number in E.164 format, such as +1206XXX5550100. For the email channel, use an email address." + "documentation" : "The destination for messages that you send to this endpoint. The address varies by channel. For mobile push channels, use the token provided by the push notification service, such as the APNs device token or the FCM registration token. For the SMS channel, use a phone number in E.164 format, such as +12065550100. For the email channel, use an email address." }, "Attributes" : { "shape" : "MapOfListOf__string", @@ -4503,11 +4556,11 @@ }, "Demographic" : { "shape" : "EndpointDemographic", - "documentation" : "The endpoint demographic attributes." + "documentation" : "Demographic attributes for the endpoint." }, "EffectiveDate" : { "shape" : "__string", - "documentation" : "The last time the endpoint was updated. Provided in ISO 8601 format." + "documentation" : "The date and time when the endpoint was updated, shown in ISO 8601 format." }, "EndpointStatus" : { "shape" : "__string", @@ -4541,15 +4594,15 @@ "members" : { "Address" : { "shape" : "__string", - "documentation" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId)." + "documentation" : "The address of the endpoint as provided by your push provider. For example, the DeviceToken or RegistrationId." }, "ApplicationId" : { "shape" : "__string", - "documentation" : "The ID of the application associated with the endpoint." + "documentation" : "The ID of the application that is associated with the endpoint." }, "Attributes" : { "shape" : "MapOfListOf__string", - "documentation" : "Custom attributes that describe the endpoint by associating a name with an array of values. For example, an attribute named \"interests\" might have the values [\"science\", \"politics\", \"travel\"]. You can use these attributes as selection criteria when you create a segment of users to engage with a messaging campaign.\n\nThe following characters are not recommended in attribute names: # : ? \\ /. The Amazon Pinpoint console does not display attributes that include these characters in the name. This limitation does not apply to attribute values." + "documentation" : "Custom attributes that describe the endpoint by associating a name with an array of values. For example, an attribute named \"interests\" might have the following values: [\"science\", \"politics\", \"travel\"]. You can use these attributes as selection criteria when you create segments.\n\nThe Amazon Pinpoint console can't display attribute names that include the following characters: hash/pound sign (#), colon (:), question mark (?), backslash (\\), and forward slash (/). For this reason, you should avoid using these characters in the names of custom attributes." }, "ChannelType" : { "shape" : "ChannelType", @@ -4557,11 +4610,11 @@ }, "CohortId" : { "shape" : "__string", - "documentation" : "A number from 0 - 99 that represents the cohort the endpoint is assigned to. Endpoints are grouped into cohorts randomly, and each cohort contains approximately 1 percent of the endpoints for an app. Amazon Pinpoint assigns cohorts to the holdout or treatment allocations for a campaign." + "documentation" : "A number from 0-99 that represents the cohort the endpoint is assigned to. Endpoints are grouped into cohorts randomly, and each cohort contains approximately 1 percent of the endpoints for an app. Amazon Pinpoint assigns cohorts to the holdout or treatment allocations for a campaign." }, "CreationDate" : { "shape" : "__string", - "documentation" : "The last time the endpoint was created. Provided in ISO 8601 format." + "documentation" : "The date and time when the endpoint was created, shown in ISO 8601 format." }, "Demographic" : { "shape" : "EndpointDemographic", @@ -4569,7 +4622,7 @@ }, "EffectiveDate" : { "shape" : "__string", - "documentation" : "The last time the endpoint was updated. Provided in ISO 8601 format." + "documentation" : "The date and time when the endpoint was last updated, shown in ISO 8601 format." }, "EndpointStatus" : { "shape" : "__string", @@ -4577,7 +4630,7 @@ }, "Id" : { "shape" : "__string", - "documentation" : "The unique ID that you assigned to the endpoint. The ID should be a globally unique identifier (GUID) to ensure that it is unique compared to all other endpoints for the application." + "documentation" : "The unique ID that you assigned to the endpoint. The ID should be a globally unique identifier (GUID) to ensure that it doesn't conflict with other endpoint IDs associated with the application." }, "Location" : { "shape" : "EndpointLocation", @@ -4633,7 +4686,7 @@ "members" : { "UserAttributes" : { "shape" : "MapOfListOf__string", - "documentation" : "Custom attributes that describe an end user by associating a name with an array of values. For example, an attribute named \"interests\" might have the values [\"science\", \"politics\", \"travel\"]. You can use these attributes as selection criteria when you create a segment of users to engage with a messaging campaign.\n\nThe following characters are not recommended in attribute names: # : ? \\ /. The Amazon Pinpoint console does not display attributes that include these characters in the name. This limitation does not apply to attribute values." + "documentation" : "Custom attributes that describe the user by associating a name with an array of values. For example, an attribute named \"interests\" might have the following values: [\"science\", \"politics\", \"travel\"]. You can use these attributes as selection criteria when you create segments.\n\nThe Amazon Pinpoint console can't display attribute names that include the following characters: hash/pound sign (#), colon (:), question mark (?), backslash (\\), and forward slash (/). For this reason, you should avoid using these characters in the names of custom attributes." }, "UserId" : { "shape" : "__string", @@ -4653,6 +4706,51 @@ "documentation" : "List of endpoints", "required" : [ ] }, + "Event" : { + "type" : "structure", + "members" : { + "Attributes" : { + "shape" : "MapOf__string", + "documentation" : "Custom attributes that are associated with the event you're adding or updating." + }, + "ClientSdkVersion" : { + "shape" : "__string", + "documentation" : "The version of the SDK that's running on the client device." + }, + "EventType" : { + "shape" : "__string", + "documentation" : "The name of the custom event that you're recording." + }, + "Metrics" : { + "shape" : "MapOf__double", + "documentation" : "Event metrics" + }, + "Session" : { + "shape" : "Session", + "documentation" : "The session" + }, + "Timestamp" : { + "shape" : "__string", + "documentation" : "The date and time when the event occurred, in ISO 8601 format." + } + }, + "documentation" : "Model for creating or updating events.", + "required" : [ ] + }, + "EventItemResponse" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string", + "documentation" : "A custom message that is associated with the processing of an event." + }, + "StatusCode" : { + "shape" : "__integer", + "documentation" : "The status code to respond with for a particular event id" + } + }, + "documentation" : "The responses that are returned after you record an event." + }, "EventStream" : { "type" : "structure", "members" : { @@ -4666,7 +4764,7 @@ }, "ExternalId" : { "shape" : "__string", - "documentation" : "DEPRECATED. Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." + "documentation" : "(Deprecated) Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." }, "LastModifiedDate" : { "shape" : "__string", @@ -4684,6 +4782,42 @@ "documentation" : "Model for an event publishing subscription export.", "required" : [ ] }, + "EventsBatch" : { + "type" : "structure", + "members" : { + "Endpoint" : { + "shape" : "PublicEndpoint", + "documentation" : "Endpoint information" + }, + "Events" : { + "shape" : "MapOfEvent", + "documentation" : "Events" + } + }, + "documentation" : "Events batch definition", + "required" : [ ] + }, + "EventsRequest" : { + "type" : "structure", + "members" : { + "BatchItem" : { + "shape" : "MapOfEventsBatch", + "documentation" : "Batch of events with endpoint id as the key and an object of EventsBatch as value. The EventsBatch object has the PublicEndpoint and a map of event Id's to events" + } + }, + "documentation" : "Put Events request", + "required" : [ ] + }, + "EventsResponse" : { + "type" : "structure", + "members" : { + "Results" : { + "shape" : "MapOfItemResponse", + "documentation" : "A map containing a multi part response for each endpoint, with the endpoint id as the key and item response as the value" + } + }, + "documentation" : "The results from processing a put events request" + }, "ExportJobRequest" : { "type" : "structure", "members" : { @@ -4735,7 +4869,7 @@ "members" : { "ApplicationId" : { "shape" : "__string", - "documentation" : "The unique ID of the application to which the job applies." + "documentation" : "The unique ID of the application associated with the export job." }, "CompletedPieces" : { "shape" : "__integer", @@ -4905,7 +5039,7 @@ }, "Body" : { "shape" : "__string", - "documentation" : "The message body of the notification, the email body or the text message." + "documentation" : "The message body of the notification." }, "CollapseKey" : { "shape" : "__string", @@ -5894,7 +6028,7 @@ }, "ExternalId" : { "shape" : "__string", - "documentation" : "DEPRECATED. Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." + "documentation" : "(Deprecated) Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." }, "Format" : { "shape" : "Format", @@ -5910,7 +6044,7 @@ }, "S3Url" : { "shape" : "__string", - "documentation" : "A URL that points to the location within an Amazon S3 bucket that contains the endpoints to import. The location can be a folder or a single file.\nThe URL should follow this format: s3://bucket-name/folder-name/file-name\n\nAmazon Pinpoint will import endpoints from this location and any subfolders it contains." + "documentation" : "The URL of the S3 bucket that contains the segment information to import. The location can be a folder or a single file. The URL should use the following format: s3://bucket-name/folder-name/file-name\n\nAmazon Pinpoint imports endpoints from this location and any subfolders it contains." }, "SegmentId" : { "shape" : "__string", @@ -5933,7 +6067,7 @@ }, "ExternalId" : { "shape" : "__string", - "documentation" : "DEPRECATED. Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." + "documentation" : "(Deprecated) Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." }, "Format" : { "shape" : "Format", @@ -5949,7 +6083,7 @@ }, "S3Url" : { "shape" : "__string", - "documentation" : "A URL that points to the location within an Amazon S3 bucket that contains the endpoints to import. The location can be a folder or a single file.\nThe URL should follow this format: s3://bucket-name/folder-name/file-name\n\nAmazon Pinpoint will import endpoints from this location and any subfolders it contains." + "documentation" : "The URL of the S3 bucket that contains the segment information to import. The location can be a folder or a single file. The URL should use the following format: s3://bucket-name/folder-name/file-name\n\nAmazon Pinpoint imports endpoints from this location and any subfolders it contains." }, "SegmentId" : { "shape" : "__string", @@ -6059,6 +6193,20 @@ "httpStatusCode" : 500 } }, + "ItemResponse" : { + "type" : "structure", + "members" : { + "EndpointItemResponse" : { + "shape" : "EndpointItemResponse", + "documentation" : "Endpoint item response after endpoint registration" + }, + "EventsItemResponse" : { + "shape" : "MapOfEventItemResponse", + "documentation" : "Events item response is a multipart response object per event Id, with eventId as the key and EventItemResponse object as the value" + } + }, + "documentation" : "The endpoint and events combined response definition" + }, "JobStatus" : { "type" : "string", "enum" : [ "CREATED", "INITIALIZING", "PROCESSING", "COMPLETING", "COMPLETED", "FAILING", "FAILED" ] @@ -6184,6 +6332,10 @@ "MessageConfiguration" : { "shape" : "DirectMessageConfiguration", "documentation" : "Message configuration." + }, + "TraceId" : { + "shape" : "__string", + "documentation" : "A unique ID that you can use to trace a message. This ID is visible to recipients." } }, "documentation" : "Send message request.", @@ -6217,7 +6369,7 @@ "members" : { "DeliveryStatus" : { "shape" : "DeliveryStatus", - "documentation" : "Delivery status of message." + "documentation" : "The delivery status of the message. Possible values:\n\nSUCCESS - The message was successfully delivered to the endpoint.\n\nTRANSIENT_FAILURE - A temporary error occurred. Amazon Pinpoint will attempt to deliver the message again later.\n\nFAILURE_PERMANENT - An error occurred when delivering the message to the endpoint. Amazon Pinpoint won't attempt to send the message again.\n\nTIMEOUT - The message couldn't be sent within the timeout period.\n\nQUIET_TIME - The local time for the endpoint was within the Quiet Hours for the campaign.\n\nDAILY_CAP - The endpoint has received the maximum number of messages it can receive within a 24-hour period.\n\nHOLDOUT - The endpoint was in a hold out treatment for the campaign.\n\nTHROTTLED - Amazon Pinpoint throttled sending to this endpoint.\n\nEXPIRED - The endpoint address is expired.\n\nCAMPAIGN_CAP - The endpoint received the maximum number of messages allowed by the campaign.\n\nSERVICE_FAILURE - A service-level failure prevented Amazon Pinpoint from delivering the message.\n\nUNKNOWN - An unknown error occurred." }, "MessageId" : { "shape" : "__string", @@ -6302,11 +6454,11 @@ "members" : { "IsoCountryCode" : { "shape" : "__string", - "documentation" : "(Optional) The two-character ISO country code for the country where the phone number was originally registered." + "documentation" : "(Optional) The two-character ISO country code for the country or region where the phone number was originally registered." }, "PhoneNumber" : { "shape" : "__string", - "documentation" : "The phone number to get information about." + "documentation" : "The phone number to get information about. The phone number that you provide should include a country code. If the number doesn't include a valid country code, the operation might result in an error." } }, "documentation" : "Phone Number Information request." @@ -6316,7 +6468,7 @@ "members" : { "Carrier" : { "shape" : "__string", - "documentation" : "The carrier that the phone number is registered with." + "documentation" : "The carrier or servive provider that the phone number is currently registered with." }, "City" : { "shape" : "__string", @@ -6324,23 +6476,23 @@ }, "CleansedPhoneNumberE164" : { "shape" : "__string", - "documentation" : "The cleansed (standardized) phone number in E.164 format." + "documentation" : "The cleansed phone number, shown in E.164 format." }, "CleansedPhoneNumberNational" : { "shape" : "__string", - "documentation" : "The cleansed phone number in national format." + "documentation" : "The cleansed phone number, shown in the local phone number format." }, "Country" : { "shape" : "__string", - "documentation" : "The country where the phone number was originally registered." + "documentation" : "The country or region where the phone number was originally registered." }, "CountryCodeIso2" : { "shape" : "__string", - "documentation" : "The two-character ISO country code for the country where the phone number was originally registered." + "documentation" : "The two-character ISO code for the country or region where the phone number was originally registered." }, "CountryCodeNumeric" : { "shape" : "__string", - "documentation" : "The numeric country code for the country where the phone number was originally registered." + "documentation" : "The numeric code for the country or region where the phone number was originally registered." }, "County" : { "shape" : "__string", @@ -6348,7 +6500,7 @@ }, "OriginalCountryCodeIso2" : { "shape" : "__string", - "documentation" : "The two-character ISO country code that was included in the request body." + "documentation" : "The two-character ISO code for the country or region that you included in the request body." }, "OriginalPhoneNumber" : { "shape" : "__string", @@ -6356,11 +6508,11 @@ }, "PhoneType" : { "shape" : "__string", - "documentation" : "A description of the phone type. Possible values include MOBILE, LANDLINE, VOIP, INVALID, and OTHER." + "documentation" : "A description of the phone type. Possible values are MOBILE, LANDLINE, VOIP, INVALID, PREPAID, and OTHER." }, "PhoneTypeCode" : { "shape" : "__integer", - "documentation" : "The phone type as an integer. Possible values include 0 (MOBILE), 1 (LANDLINE), 2 (VOIP), 3 (INVALID), and 4 (OTHER)." + "documentation" : "The phone type, represented by an integer. Possible values include 0 (MOBILE), 1 (LANDLINE), 2 (VOIP), 3 (INVALID), 4 (OTHER), and 5 (PREPAID)." }, "Timezone" : { "shape" : "__string", @@ -6368,7 +6520,7 @@ }, "ZipCode" : { "shape" : "__string", - "documentation" : "The zip code for the location where the phone number was originally registered." + "documentation" : "The postal code for the location where the phone number was originally registered." } }, "documentation" : "Phone Number Information response." @@ -6393,6 +6545,56 @@ "required" : [ "NumberValidateResponse" ], "payload" : "NumberValidateResponse" }, + "PublicEndpoint" : { + "type" : "structure", + "members" : { + "Address" : { + "shape" : "__string", + "documentation" : "The unique identifier for the recipient. For example, an address could be a device token or an endpoint ID." + }, + "Attributes" : { + "shape" : "MapOfListOf__string", + "documentation" : "Custom attributes that your app reports to Amazon Pinpoint. You can use these attributes as selection criteria when you create a segment." + }, + "ChannelType" : { + "shape" : "ChannelType", + "documentation" : "The channel type.\n\nValid values: APNS, GCM" + }, + "Demographic" : { + "shape" : "EndpointDemographic", + "documentation" : "The endpoint demographic attributes." + }, + "EffectiveDate" : { + "shape" : "__string", + "documentation" : "The date and time when the endpoint was last updated." + }, + "EndpointStatus" : { + "shape" : "__string", + "documentation" : "The status of the endpoint. If the update fails, the value is INACTIVE. If the endpoint is updated successfully, the value is ACTIVE." + }, + "Location" : { + "shape" : "EndpointLocation", + "documentation" : "The endpoint location attributes." + }, + "Metrics" : { + "shape" : "MapOf__double", + "documentation" : "Custom metrics that your app reports to Amazon Pinpoint." + }, + "OptOut" : { + "shape" : "__string", + "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL - User has opted out of all messages.\n\nNONE - Users has not opted out and receives all messages." + }, + "RequestId" : { + "shape" : "__string", + "documentation" : "A unique identifier that is generated each time the endpoint is updated." + }, + "User" : { + "shape" : "EndpointUser", + "documentation" : "Custom user-specific attributes that your app reports to Amazon Pinpoint." + } + }, + "documentation" : "Public endpoint attributes." + }, "PutEventStreamRequest" : { "type" : "structure", "members" : { @@ -6419,6 +6621,32 @@ "required" : [ "EventStream" ], "payload" : "EventStream" }, + "PutEventsRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." + }, + "EventsRequest" : { + "shape" : "EventsRequest" + } + }, + "required" : [ "ApplicationId", "EventsRequest" ], + "payload" : "EventsRequest" + }, + "PutEventsResponse" : { + "type" : "structure", + "members" : { + "EventsResponse" : { + "shape" : "EventsResponse" + } + }, + "required" : [ "EventsResponse" ], + "payload" : "EventsResponse" + }, "QuietTime" : { "type" : "structure", "members" : { @@ -6706,15 +6934,15 @@ }, "SourceSegments" : { "shape" : "ListOfSegmentReference", - "documentation" : "Segments that define the source of this segment. Currently a maximum of 1 import segment is supported." + "documentation" : "The base segment that you build your segment on. The source segment defines the starting \"universe\" of endpoints. When you add dimensions to the segment, it filters the source segment based on the dimensions that you specify. You can specify more than one dimensional segment. You can only specify one imported segment." }, "SourceType" : { "shape" : "SourceType", - "documentation" : "Include or exclude the source." + "documentation" : "Specify how to handle multiple source segments. For example, if you specify three source segments, should the resulting segment be based on any or all of the segments? Acceptable values: ANY or ALL." }, "Type" : { "shape" : "Type", - "documentation" : "How should the dimensions be applied for the result" + "documentation" : "Specify how to handle multiple segment dimensions. For example, if you specify three dimensions, should the resulting segment include endpoints that are matched by all, any, or none of the dimensions? Acceptable values: ALL, ANY, or NONE." } }, "documentation" : "Segment group definition.", @@ -6725,11 +6953,11 @@ "members" : { "Groups" : { "shape" : "ListOfSegmentGroup", - "documentation" : "List of dimension groups to evaluate." + "documentation" : "A set of segment criteria to evaluate." }, "Include" : { "shape" : "Include", - "documentation" : "How should the groups be applied for the result" + "documentation" : "Specify how to handle multiple segment groups. For example, if the segment includes three segment groups, should the resulting segment include endpoints that are matched by all, any, or none of the segment groups you created. Acceptable values: ALL, ANY, or NONE." } }, "documentation" : "Segment group definition.", @@ -6740,11 +6968,11 @@ "members" : { "ChannelCounts" : { "shape" : "MapOf__integer", - "documentation" : "Channel type counts" + "documentation" : "The number of channel types in the imported segment." }, "ExternalId" : { "shape" : "__string", - "documentation" : "DEPRECATED. Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." + "documentation" : "(Deprecated) Your AWS account ID, which you assigned to the ExternalID key in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This requirement is removed, and external IDs are not recommended for IAM roles assumed by Amazon Pinpoint." }, "Format" : { "shape" : "Format", @@ -6756,7 +6984,7 @@ }, "S3Url" : { "shape" : "__string", - "documentation" : "A URL that points to the Amazon S3 location from which the endpoints for this segment were imported." + "documentation" : "The URL of the S3 bucket that the segment was imported from." }, "Size" : { "shape" : "__integer", @@ -6785,7 +7013,7 @@ "members" : { "Id" : { "shape" : "__string", - "documentation" : "Segment Id." + "documentation" : "A unique identifier for the segment." }, "Version" : { "shape" : "__integer", @@ -6799,11 +7027,11 @@ "members" : { "ApplicationId" : { "shape" : "__string", - "documentation" : "The ID of the application to which the segment applies." + "documentation" : "The ID of the application that the segment applies to." }, "CreationDate" : { "shape" : "__string", - "documentation" : "The date the segment was created in ISO 8601 format." + "documentation" : "The date and time when the segment was created." }, "Dimensions" : { "shape" : "SegmentDimensions", @@ -6819,15 +7047,15 @@ }, "LastModifiedDate" : { "shape" : "__string", - "documentation" : "The date the segment was last updated in ISO 8601 format." + "documentation" : "The date and time when the segment was last modified." }, "Name" : { "shape" : "__string", - "documentation" : "The name of segment" + "documentation" : "The name of the segment." }, "SegmentGroups" : { "shape" : "SegmentGroupList", - "documentation" : "Segment definition groups. We currently only support one. If specified Dimensions must be empty." + "documentation" : "A segment group, which consists of zero or more source segments, plus dimensions that are applied to those source segments." }, "SegmentType" : { "shape" : "SegmentType", @@ -6897,6 +7125,10 @@ "shape" : "DirectMessageConfiguration", "documentation" : "Message definitions for the default message and any messages that are tailored for specific channels." }, + "TraceId" : { + "shape" : "__string", + "documentation" : "A unique ID that you can use to trace a message. This ID is visible to recipients." + }, "Users" : { "shape" : "MapOfEndpointSendConfiguration", "documentation" : "A map that associates user IDs with EndpointSendConfiguration objects. Within an EndpointSendConfiguration object, you can tailor the message for a user by specifying message overrides or substitutions." @@ -6950,6 +7182,29 @@ "required" : [ "SendUsersMessageResponse" ], "payload" : "SendUsersMessageResponse" }, + "Session" : { + "type" : "structure", + "members" : { + "Duration" : { + "shape" : "__integer", + "documentation" : "Session duration in millis" + }, + "Id" : { + "shape" : "__string", + "documentation" : "A unique identifier for the session." + }, + "StartTimestamp" : { + "shape" : "__string", + "documentation" : "The date and time when the session began." + }, + "StopTimestamp" : { + "shape" : "__string", + "documentation" : "The date and time when the session ended." + } + }, + "documentation" : "Information about a session.", + "required" : [ ] + }, "SetDimension" : { "type" : "structure", "members" : { @@ -6967,7 +7222,7 @@ }, "SourceType" : { "type" : "string", - "enum" : [ "ALL", "ANY" ] + "enum" : [ "ALL", "ANY", "NONE" ] }, "TooManyRequestsException" : { "type" : "structure", @@ -7526,7 +7781,7 @@ }, "SegmentGroups" : { "shape" : "SegmentGroupList", - "documentation" : "Segment definition groups. We currently only support one. If specified Dimensions must be empty." + "documentation" : "A segment group, which consists of zero or more source segments, plus dimensions that are applied to those source segments. Your request can only include one segment group. Your request can include either a SegmentGroups object or a Dimensions object, but not both." } }, "documentation" : "Segment definition.", @@ -7700,6 +7955,42 @@ "shape" : "EndpointSendConfiguration" } }, + "MapOfEvent" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "Event" + } + }, + "MapOfEventItemResponse" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "EventItemResponse" + } + }, + "MapOfEventsBatch" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "EventsBatch" + } + }, + "MapOfItemResponse" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "ItemResponse" + } + }, "MapOfMessageResult" : { "type" : "map", "key" : { diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index 22ef5905..8d78aed4 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -55,6 +55,22 @@ ], "documentation":"

Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.

" }, + "GetSpeechSynthesisTask":{ + "name":"GetSpeechSynthesisTask", + "http":{ + "method":"GET", + "requestUri":"/v1/synthesisTasks/{TaskId}", + "responseCode":200 + }, + "input":{"shape":"GetSpeechSynthesisTaskInput"}, + "output":{"shape":"GetSpeechSynthesisTaskOutput"}, + "errors":[ + {"shape":"InvalidTaskIdException"}, + {"shape":"ServiceFailureException"}, + {"shape":"SynthesisTaskNotFoundException"} + ], + "documentation":"

Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task.

" + }, "ListLexicons":{ "name":"ListLexicons", "http":{ @@ -70,6 +86,21 @@ ], "documentation":"

Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.

" }, + "ListSpeechSynthesisTasks":{ + "name":"ListSpeechSynthesisTasks", + "http":{ + "method":"GET", + "requestUri":"/v1/synthesisTasks", + "responseCode":200 + }, + "input":{"shape":"ListSpeechSynthesisTasksInput"}, + "output":{"shape":"ListSpeechSynthesisTasksOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns a list of SpeechSynthesisTask objects ordered by their creation date. This operation can filter the tasks by their status, for example, allowing users to list only tasks that are completed.

" + }, "PutLexicon":{ "name":"PutLexicon", "http":{ @@ -90,6 +121,30 @@ ], "documentation":"

Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

" }, + "StartSpeechSynthesisTask":{ + "name":"StartSpeechSynthesisTask", + "http":{ + "method":"POST", + "requestUri":"/v1/synthesisTasks", + "responseCode":200 + }, + "input":{"shape":"StartSpeechSynthesisTaskInput"}, + "output":{"shape":"StartSpeechSynthesisTaskOutput"}, + "errors":[ + {"shape":"TextLengthExceededException"}, + {"shape":"InvalidS3BucketException"}, + {"shape":"InvalidS3KeyException"}, + {"shape":"InvalidSampleRateException"}, + {"shape":"InvalidSnsTopicArnException"}, + {"shape":"InvalidSsmlException"}, + {"shape":"LexiconNotFoundException"}, + {"shape":"ServiceFailureException"}, + {"shape":"MarksNotSupportedForFormatException"}, + {"shape":"SsmlMarksNotSupportedForTextTypeException"}, + {"shape":"LanguageNotSupportedException"} + ], + "documentation":"

Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask. This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.

" + }, "SynthesizeSpeech":{ "name":"SynthesizeSpeech", "http":{ @@ -106,7 +161,8 @@ {"shape":"LexiconNotFoundException"}, {"shape":"ServiceFailureException"}, {"shape":"MarksNotSupportedForFormatException"}, - {"shape":"SsmlMarksNotSupportedForTextTypeException"} + {"shape":"SsmlMarksNotSupportedForTextTypeException"}, + {"shape":"LanguageNotSupportedException"} ], "documentation":"

Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works.

" } @@ -118,6 +174,7 @@ "streaming":true }, "ContentType":{"type":"string"}, + "DateTime":{"type":"timestamp"}, "DeleteLexiconInput":{ "type":"structure", "required":["Name"], @@ -144,6 +201,12 @@ "location":"querystring", "locationName":"LanguageCode" }, + "IncludeAdditionalLanguageCodes":{ + "shape":"IncludeAdditionalLanguageCodes", + "documentation":"

Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no.

", + "location":"querystring", + "locationName":"IncludeAdditionalLanguageCodes" + }, "NextToken":{ "shape":"NextToken", "documentation":"

An opaque pagination token returned from the previous DescribeVoices operation. If present, this indicates where to continue the listing.

", @@ -198,6 +261,28 @@ } } }, + "GetSpeechSynthesisTaskInput":{ + "type":"structure", + "required":["TaskId"], + "members":{ + "TaskId":{ + "shape":"TaskId", + "documentation":"

The Amazon Polly generated identifier for a speech synthesis task.

", + "location":"uri", + "locationName":"TaskId" + } + } + }, + "GetSpeechSynthesisTaskOutput":{ + "type":"structure", + "members":{ + "SynthesisTask":{ + "shape":"SynthesisTask", + "documentation":"

SynthesisTask object that provides information from the requested task, including output format, creation time, task status, and so on.

" + } + } + }, + "IncludeAdditionalLanguageCodes":{"type":"boolean"}, "InvalidLexiconException":{ "type":"structure", "members":{ @@ -216,6 +301,24 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidS3BucketException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The provided Amazon S3 bucket name is invalid. Please check your input with S3 bucket naming requirements and try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidS3KeyException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The provided Amazon S3 key prefix is invalid. Please provide a valid S3 object key name.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidSampleRateException":{ "type":"structure", "members":{ @@ -225,6 +328,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidSnsTopicArnException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The provided SNS topic ARN is invalid. Please provide a valid SNS topic ARN and try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidSsmlException":{ "type":"structure", "members":{ @@ -234,6 +346,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidTaskIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The provided Task ID is not valid. Please provide a valid Task ID and try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "LanguageCode":{ "type":"string", "enum":[ @@ -251,8 +372,9 @@ "fr-FR", "is-IS", "it-IT", - "ko-KR", "ja-JP", + "hi-IN", + "ko-KR", "nb-NO", "nl-NL", "pl-PL", @@ -264,7 +386,20 @@ "tr-TR" ] }, + "LanguageCodeList":{ + "type":"list", + "member":{"shape":"LanguageCode"} + }, "LanguageName":{"type":"string"}, + "LanguageNotSupportedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The language specified is not currently supported by Amazon Polly in this capacity.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "LastModified":{"type":"timestamp"}, "LexemesCount":{"type":"integer"}, "Lexicon":{ @@ -383,6 +518,42 @@ } } }, + "ListSpeechSynthesisTasksInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Maximum number of speech synthesis tasks returned in a List operation.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token to use in the next request to continue the listing of speech synthesis tasks.

", + "location":"querystring", + "locationName":"NextToken" + }, + "Status":{ + "shape":"TaskStatus", + "documentation":"

Status of the speech synthesis tasks returned in a List operation

", + "location":"querystring", + "locationName":"Status" + } + } + }, + "ListSpeechSynthesisTasksOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token returned from the previous List operation in this request. If present, this indicates where to continue the listing.

" + }, + "SynthesisTasks":{ + "shape":"SynthesisTasks", + "documentation":"

List of SynthesisTask objects that provides information from the specified task in the list request, including output format, creation time, task status, and so on.

" + } + } + }, "MarksNotSupportedForFormatException":{ "type":"structure", "members":{ @@ -410,6 +581,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, "NextToken":{"type":"string"}, "OutputFormat":{ "type":"string", @@ -420,6 +596,15 @@ "pcm" ] }, + "OutputS3BucketName":{ + "type":"string", + "pattern":"^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" + }, + "OutputS3KeyPrefix":{ + "type":"string", + "pattern":"^[0-9a-zA-Z\\/\\!\\-_\\.\\*\\'\\(\\)]{0,800}$" + }, + "OutputUri":{"type":"string"}, "PutLexiconInput":{ "type":"structure", "required":[ @@ -457,6 +642,10 @@ "fault":true }, "Size":{"type":"integer"}, + "SnsTopicArn":{ + "type":"string", + "pattern":"^arn:aws(-(cn|iso(-b)?|us-gov))?:sns:.*:\\w{12}:.+$" + }, "SpeechMarkType":{ "type":"string", "enum":[ @@ -480,6 +669,145 @@ "error":{"httpStatusCode":400}, "exception":true }, + "StartSpeechSynthesisTaskInput":{ + "type":"structure", + "required":[ + "OutputFormat", + "OutputS3BucketName", + "Text", + "VoiceId" + ], + "members":{ + "LexiconNames":{ + "shape":"LexiconNameList", + "documentation":"

List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.

" + }, + "OutputFormat":{ + "shape":"OutputFormat", + "documentation":"

The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

" + }, + "OutputS3BucketName":{ + "shape":"OutputS3BucketName", + "documentation":"

Amazon S3 bucket name to which the output file will be saved.

" + }, + "OutputS3KeyPrefix":{ + "shape":"OutputS3KeyPrefix", + "documentation":"

The Amazon S3 key prefix for the output speech file.

" + }, + "SampleRate":{ + "shape":"SampleRate", + "documentation":"

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", and \"22050\". The default value is \"22050\".

Valid values for pcm are \"8000\" and \"16000\" The default value is \"16000\".

" + }, + "SnsTopicArn":{ + "shape":"SnsTopicArn", + "documentation":"

ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.

" + }, + "SpeechMarkTypes":{ + "shape":"SpeechMarkTypeList", + "documentation":"

The type of speech marks returned for the input text.

" + }, + "Text":{ + "shape":"Text", + "documentation":"

The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.

" + }, + "TextType":{ + "shape":"TextType", + "documentation":"

Specifies whether the input text is plain text or SSML. The default value is plain text.

" + }, + "VoiceId":{ + "shape":"VoiceId", + "documentation":"

Voice ID to use for the synthesis.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" + } + } + }, + "StartSpeechSynthesisTaskOutput":{ + "type":"structure", + "members":{ + "SynthesisTask":{ + "shape":"SynthesisTask", + "documentation":"

SynthesisTask object that provides information and attributes about a newly submitted speech synthesis task.

" + } + } + }, + "SynthesisTask":{ + "type":"structure", + "members":{ + "TaskId":{ + "shape":"TaskId", + "documentation":"

The Amazon Polly generated identifier for a speech synthesis task.

" + }, + "TaskStatus":{ + "shape":"TaskStatus", + "documentation":"

Current status of the individual speech synthesis task.

" + }, + "TaskStatusReason":{ + "shape":"TaskStatusReason", + "documentation":"

Reason for the current status of a specific speech synthesis task, including errors if the task has failed.

" + }, + "OutputUri":{ + "shape":"OutputUri", + "documentation":"

Pathway for the output speech file.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

Timestamp for the time the synthesis task was started.

" + }, + "RequestCharacters":{ + "shape":"RequestCharacters", + "documentation":"

Number of billable characters synthesized.

" + }, + "SnsTopicArn":{ + "shape":"SnsTopicArn", + "documentation":"

ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.

" + }, + "LexiconNames":{ + "shape":"LexiconNameList", + "documentation":"

List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.

" + }, + "OutputFormat":{ + "shape":"OutputFormat", + "documentation":"

The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

" + }, + "SampleRate":{ + "shape":"SampleRate", + "documentation":"

The audio frequency specified in Hz.

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", and \"22050\". The default value is \"22050\".

Valid values for pcm are \"8000\" and \"16000\" The default value is \"16000\".

" + }, + "SpeechMarkTypes":{ + "shape":"SpeechMarkTypeList", + "documentation":"

The type of speech marks returned for the input text.

" + }, + "TextType":{ + "shape":"TextType", + "documentation":"

Specifies whether the input text is plain text or SSML. The default value is plain text.

" + }, + "VoiceId":{ + "shape":"VoiceId", + "documentation":"

Voice ID to use for the synthesis.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" + } + }, + "documentation":"

SynthesisTask object that provides information about a speech synthesis task.

" + }, + "SynthesisTaskNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The Speech Synthesis task with requested Task ID cannot be found.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "SynthesisTasks":{ + "type":"list", + "member":{"shape":"SynthesisTask"} + }, "SynthesizeSpeechInput":{ "type":"structure", "required":[ @@ -515,6 +843,10 @@ "VoiceId":{ "shape":"VoiceId", "documentation":"

Voice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" } } }, @@ -540,13 +872,28 @@ }, "payload":"AudioStream" }, + "TaskId":{ + "type":"string", + "max":128, + "min":1 + }, + "TaskStatus":{ + "type":"string", + "enum":[ + "scheduled", + "inProgress", + "completed", + "failed" + ] + }, + "TaskStatusReason":{"type":"string"}, "Text":{"type":"string"}, "TextLengthExceededException":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The value of the \"Text\" parameter is longer than the accepted limits. The limit for input text is a maximum of 6000 characters total, of which no more than 3000 can be billed characters. SSML tags are not counted as billed characters.

", + "documentation":"

The value of the \"Text\" parameter is longer than the accepted limits. For the SynthesizeSpeech API, the limit for input text is a maximum of 6000 characters total, of which no more than 3000 can be billed characters. For the StartSpeechSynthesisTask API, the maximum is 200,000 characters, of which no more than 100,000 can be billed characters. SSML tags are not counted as billed characters.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -597,6 +944,10 @@ "Name":{ "shape":"VoiceName", "documentation":"

Name of the voice (for example, Salli, Kendra, etc.). This provides a human readable voice name that you might display in your application.

" + }, + "AdditionalLanguageCodes":{ + "shape":"LanguageCodeList", + "documentation":"

Additional codes for languages available for the specified voice in addition to its default language.

For example, the default language for Aditi is Indian English (en-IN) because it was first used for that language. Since Aditi is bilingual and fluent in both Indian English and Hindi, this parameter would show the code hi-IN.

" } }, "documentation":"

Description of the voice.

" diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index cbae544b..66e98966 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -301,7 +301,8 @@ {"shape":"StorageTypeNotSupportedFault"}, {"shape":"AuthorizationNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"}, - {"shape":"DomainNotFoundFault"} + {"shape":"DomainNotFoundFault"}, + {"shape":"BackupPolicyNotFoundFault"} ], "documentation":"

Creates a new DB instance.

" }, @@ -1116,6 +1117,24 @@ ], "documentation":"

Lists all tags on an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

" }, + "ModifyCurrentDBClusterCapacity":{ + "name":"ModifyCurrentDBClusterCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCurrentDBClusterCapacityMessage"}, + "output":{ + "shape":"DBClusterCapacityInfo", + "resultWrapper":"ModifyCurrentDBClusterCapacityResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBClusterCapacityFault"} + ], + "documentation":"

Set the capacity of an Aurora Serverless DB cluster to a specific value.

Aurora Serverless scales seamlessly based on the workload on the DB cluster. In some cases, the capacity might not scale fast enough to meet a sudden change in workload, such as a large number of new transactions. Call ModifyCurrentDBClusterCapacity to set the capacity explicitly.

After this call sets the DB cluster capacity, Aurora Serverless can automatically scale the DB cluster based on the cooldown period for scaling up and the cooldown period for scaling down.

For more information about Aurora Serverless, see Using Amazon Aurora Serverless in the Amazon RDS User Guide.

If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, connections that prevent Aurora Serverless from finding a scaling point might be dropped. For more information about scaling points, see Autoscaling for Aurora Serverless in the Amazon RDS User Guide.

" + }, "ModifyDBCluster":{ "name":"ModifyDBCluster", "http":{ @@ -1204,7 +1223,8 @@ {"shape":"StorageTypeNotSupportedFault"}, {"shape":"AuthorizationNotFoundFault"}, {"shape":"CertificateNotFoundFault"}, - {"shape":"DomainNotFoundFault"} + {"shape":"DomainNotFoundFault"}, + {"shape":"BackupPolicyNotFoundFault"} ], "documentation":"

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance.

" }, @@ -1588,7 +1608,8 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBSecurityGroupNotFoundFault"}, {"shape":"DomainNotFoundFault"}, - {"shape":"DBParameterGroupNotFoundFault"} + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"BackupPolicyNotFoundFault"} ], "documentation":"

Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.

If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS doesn't allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

" }, @@ -1619,7 +1640,8 @@ {"shape":"OptionGroupNotFoundFault"}, {"shape":"StorageTypeNotSupportedFault"}, {"shape":"AuthorizationNotFoundFault"}, - {"shape":"KMSKeyNotAccessibleFault"} + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"BackupPolicyNotFoundFault"} ], "documentation":"

Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance.

" }, @@ -1654,7 +1676,8 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBSecurityGroupNotFoundFault"}, {"shape":"DomainNotFoundFault"}, - {"shape":"DBParameterGroupNotFoundFault"} + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"BackupPolicyNotFoundFault"} ], "documentation":"

Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.

The target database is created with most of the original configuration, but in a system-selected Availability Zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime.

" }, @@ -2007,6 +2030,17 @@ }, "documentation":"

" }, + "BackupPolicyNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"BackupPolicyNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "Boolean":{"type":"boolean"}, "BooleanOptional":{"type":"boolean"}, "Certificate":{ @@ -2099,7 +2133,7 @@ "documentation":"

The list of log types to disable.

" } }, - "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs will be exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" }, "CopyDBClusterParameterGroupMessage":{ "type":"structure", @@ -2366,7 +2400,15 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + }, + "EngineMode":{ + "shape":"String", + "documentation":"

The DB engine mode of the DB cluster, either provisioned or serverless.

" + }, + "ScalingConfiguration":{ + "shape":"ScalingConfiguration", + "documentation":"

For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster.

" } }, "documentation":"

" @@ -2537,7 +2579,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is private.

" + "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName is not specified, and PubliclyAccessible is not specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible is not specified, the following applies:

" }, "Tags":{"shape":"TagList"}, "DBClusterIdentifier":{ @@ -2610,7 +2652,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -2664,7 +2706,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is private.

" + "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" }, "Tags":{"shape":"TagList"}, "DBSubnetGroupName":{ @@ -2713,7 +2755,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the new DB instance is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -3080,8 +3122,14 @@ }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

" - } + "documentation":"

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon RDS User Guide.

" + }, + "Capacity":{"shape":"IntegerOptional"}, + "EngineMode":{ + "shape":"String", + "documentation":"

The DB engine mode of the DB cluster, either provisioned or serverless.

" + }, + "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"} }, "documentation":"

Contains the details of an Amazon RDS DB cluster.

This data type is used as a response element in the DescribeDBClusters action.

", "wrapper":true @@ -3161,6 +3209,31 @@ }, "exception":true }, + "DBClusterCapacityInfo":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

A user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

" + }, + "PendingCapacity":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the capacity that the DB cluster scales to next.

" + }, + "CurrentCapacity":{ + "shape":"IntegerOptional", + "documentation":"

The current capacity of the DB cluster.

" + }, + "SecondsBeforeTimeout":{ + "shape":"IntegerOptional", + "documentation":"

The number of seconds before a call to ModifyCurrentDBClusterCapacity times out.

" + }, + "TimeoutAction":{ + "shape":"String", + "documentation":"

The timeout action of a call to ModifyCurrentDBClusterCapacity, either ForceApplyCapacityChange or RollbackCapacityChange.

" + } + } + }, "DBClusterList":{ "type":"list", "member":{ @@ -3347,7 +3420,8 @@ "Status":{ "shape":"String", "documentation":"

Describes the state of association between the IAM role and the DB cluster. The Status property returns one of the following values:

" - } + }, + "FeatureName":{"shape":"String"} }, "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.

" }, @@ -3612,6 +3686,10 @@ "SupportsReadReplica":{ "shape":"Boolean", "documentation":"

Indicates whether the database engine version supports read replicas.

" + }, + "SupportedEngineModes":{ + "shape":"EngineModeList", + "documentation":"

A list of the supported DB engine modes.

" } }, "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" @@ -3762,7 +3840,7 @@ }, "PubliclyAccessible":{ "shape":"Boolean", - "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is private.

" + "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

" }, "StatusInfos":{ "shape":"DBInstanceStatusInfoList", @@ -3850,7 +3928,7 @@ }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

A list of log types that this DB instance is configured to export to CloudWatch Logs.

" + "documentation":"

A list of log types that this DB instance is configured to export to CloudWatch Logs.

Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon RDS User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -3918,7 +3996,7 @@ }, "Status":{ "shape":"String", - "documentation":"

Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

" + "documentation":"

Status of the DB instance. For a StatusType of read replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated.

" }, "Message":{ "shape":"String", @@ -5713,6 +5791,10 @@ "documentation":"

Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

", "wrapper":true }, + "EngineModeList":{ + "type":"list", + "member":{"shape":"String"} + }, "Event":{ "type":"structure", "members":{ @@ -6006,6 +6088,18 @@ }, "Integer":{"type":"integer"}, "IntegerOptional":{"type":"integer"}, + "InvalidDBClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Capacity isn't a valid Aurora Serverless DB cluster capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.

", + "error":{ + "code":"InvalidDBClusterCapacityFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBClusterSnapshotStateFault":{ "type":"structure", "members":{ @@ -6223,6 +6317,28 @@ }, "Long":{"type":"long"}, "LongOptional":{"type":"long"}, + "ModifyCurrentDBClusterCapacityMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

Constraints:

" + }, + "Capacity":{ + "shape":"IntegerOptional", + "documentation":"

The DB cluster capacity.

Constraints:

" + }, + "SecondsBeforeTimeout":{ + "shape":"IntegerOptional", + "documentation":"

The amount of time, in seconds, that Aurora Serverless tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.

" + }, + "TimeoutAction":{ + "shape":"String", + "documentation":"

The action to take when the timeout is reached, either ForceApplyCapacityChange or RollbackCapacityChange.

ForceApplyCapacityChange, the default, sets the capacity to the specified value as soon as possible.

RollbackCapacityChange ignores the capacity change if a scaling point is not found in the timeout period.

" + } + } + }, "ModifyDBClusterMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -6286,6 +6402,10 @@ "EngineVersion":{ "shape":"String", "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

For a list of valid engine versions, see CreateDBCluster, or call DescribeDBEngineVersions.

" + }, + "ScalingConfiguration":{ + "shape":"ScalingConfiguration", + "documentation":"

The scaling properties of the DB cluster. You can only modify scaling properties for DB clusters in serverless DB engine mode.

" } }, "documentation":"

" @@ -7190,6 +7310,10 @@ "AvailableProcessorFeatures":{ "shape":"AvailableProcessorFeatureList", "documentation":"

A list of the available processor features for the DB instance class of a DB instance.

" + }, + "SupportedEngineModes":{ + "shape":"EngineModeList", + "documentation":"

A list of the supported DB engine modes.

" } }, "documentation":"

Contains a list of available options for a DB instance.

This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

", @@ -7258,6 +7382,10 @@ "ApplyMethod":{ "shape":"ApplyMethod", "documentation":"

Indicates when to apply parameter updates.

" + }, + "SupportedEngineModes":{ + "shape":"EngineModeList", + "documentation":"

The valid DB engine modes.

" } }, "documentation":"

This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

" @@ -8050,7 +8178,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" } } }, @@ -8126,7 +8254,15 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + }, + "EngineMode":{ + "shape":"String", + "documentation":"

The DB engine mode of the DB cluster, either provisioned or serverless.

" + }, + "ScalingConfiguration":{ + "shape":"ScalingConfiguration", + "documentation":"

For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster.

" } }, "documentation":"

" @@ -8195,7 +8331,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" } }, "documentation":"

" @@ -8243,7 +8379,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is private.

" + "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -8300,7 +8436,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8421,7 +8557,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the DB instance is publicly accessible or not. For more information, see CreateDBInstance.

" + "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" }, "Tags":{ "shape":"TagList", @@ -8489,7 +8625,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8552,7 +8688,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is private.

" + "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -8609,7 +8745,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8697,6 +8833,50 @@ }, "exception":true }, + "ScalingConfiguration":{ + "type":"structure", + "members":{ + "MinCapacity":{ + "shape":"IntegerOptional", + "documentation":"

The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.

The minimum capacity must be less than or equal to the maximum capacity.

" + }, + "MaxCapacity":{ + "shape":"IntegerOptional", + "documentation":"

The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256.

The maximum capacity must be greater than or equal to the minimum capacity.

" + }, + "AutoPause":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused only when it's idle (it has no connections).

If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it.

" + }, + "SecondsUntilAutoPause":{ + "shape":"IntegerOptional", + "documentation":"

The time, in seconds, before an Aurora DB cluster in serverless mode is paused.

" + } + }, + "documentation":"

Contains the scaling configuration of an Aurora Serverless DB cluster.

For more information, see Using Amazon Aurora Serverless in the Amazon RDS User Guide.

" + }, + "ScalingConfigurationInfo":{ + "type":"structure", + "members":{ + "MinCapacity":{ + "shape":"IntegerOptional", + "documentation":"

The maximum capacity for the Aurora DB cluster in serverless DB engine mode.

" + }, + "MaxCapacity":{ + "shape":"IntegerOptional", + "documentation":"

The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

" + }, + "AutoPause":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether automatic pause is allowed for the Aurora DB cluster in serverless DB engine mode.

" + }, + "SecondsUntilAutoPause":{ + "shape":"IntegerOptional", + "documentation":"

The remaining amount of time, in seconds, before the Aurora DB cluster in serverless mode is paused. A DB cluster can be paused only when it's idle (it has no connections).

" + } + }, + "documentation":"

Shows the scaling configuration for an Aurora DB cluster in serverless DB engine mode.

For more information, see Using Amazon Aurora Serverless in the Amazon RDS User Guide.

" + }, "SharedSnapshotQuotaExceededFault":{ "type":"structure", "members":{ diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index f184f78a..015ba380 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -122,7 +122,8 @@ {"shape":"TagLimitExceededFault"}, {"shape":"InvalidTagFault"}, {"shape":"LimitExceededFault"}, - {"shape":"DependentServiceRequestThrottlingFault"} + {"shape":"DependentServiceRequestThrottlingFault"}, + {"shape":"InvalidClusterTrackFault"} ], "documentation":"

Creates a new cluster.

To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, @@ -549,6 +550,23 @@ ], "documentation":"

Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

" }, + "DescribeClusterTracks":{ + "name":"DescribeClusterTracks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterTracksMessage"}, + "output":{ + "shape":"TrackListMessage", + "resultWrapper":"DescribeClusterTracksResult" + }, + "errors":[ + {"shape":"InvalidClusterTrackFault"}, + {"shape":"UnauthorizedOperation"} + ], + "documentation":"

Returns a list of all the available maintenance tracks.

" + }, "DescribeClusterVersions":{ "name":"DescribeClusterVersions", "http":{ @@ -917,7 +935,7 @@ {"shape":"UnsupportedOperationFault"}, {"shape":"DependentServiceUnavailableFault"} ], - "documentation":"

Returns an array of ReservedNodeOfferings which is filtered by payment type, term, and instance type.

" + "documentation":"

Returns an array of DC2 ReservedNodeOfferings that matches the payment type, term, and usage price of the given DC1 reserved node.

" }, "ModifyCluster":{ "name":"ModifyCluster", @@ -947,7 +965,8 @@ {"shape":"LimitExceededFault"}, {"shape":"DependentServiceRequestThrottlingFault"}, {"shape":"InvalidElasticIpFault"}, - {"shape":"TableLimitExceededFault"} + {"shape":"TableLimitExceededFault"}, + {"shape":"InvalidClusterTrackFault"} ], "documentation":"

Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

" }, @@ -1152,7 +1171,8 @@ {"shape":"ClusterParameterGroupNotFoundFault"}, {"shape":"ClusterSecurityGroupNotFoundFault"}, {"shape":"LimitExceededFault"}, - {"shape":"DependentServiceRequestThrottlingFault"} + {"shape":"DependentServiceRequestThrottlingFault"}, + {"shape":"InvalidClusterTrackFault"} ], "documentation":"

Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.

If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

" }, @@ -1243,11 +1263,11 @@ "members":{ "ReservedNodeId":{ "shape":"String", - "documentation":"

A string representing the identifier of the Reserved Node to be exchanged.

" + "documentation":"

A string representing the node identifier of the DC1 Reserved Node to be exchanged.

" }, "TargetReservedNodeOfferingId":{ "shape":"String", - "documentation":"

The unique identifier of the Reserved Node offering to be used for the exchange.

" + "documentation":"

The unique identifier of the DC2 Reserved Node offering to be used for the exchange. You can obtain the value for the parameter by calling GetReservedNodeExchangeOfferings

" } } }, @@ -1554,6 +1574,10 @@ "PendingActions":{ "shape":"PendingActionsList", "documentation":"

Cluster operations that are waiting to be started.

" + }, + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the maintenance track for the cluster.

" } }, "documentation":"

Describes a cluster.

", @@ -2340,6 +2364,10 @@ "IamRoles":{ "shape":"IamRoleArnList", "documentation":"

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated with it at any time.

" + }, + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

An optional parameter for the name of the maintenance track for the cluster. If you don't provide a maintenance track name, the cluster is assigned to the current track.

" } }, "documentation":"

" @@ -2991,6 +3019,23 @@ }, "documentation":"

" }, + "DescribeClusterTracksMessage":{ + "type":"structure", + "members":{ + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the maintenance track.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

An integer value for the maximum number of maintenance tracks to return.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterTracks request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, "DescribeClusterVersionsMessage":{ "type":"structure", "members":{ @@ -3417,6 +3462,13 @@ }, "documentation":"

Describes the status of the elastic IP (EIP) address.

" }, + "EligibleTracksToUpdateList":{ + "type":"list", + "member":{ + "shape":"UpdateTarget", + "locationName":"UpdateTarget" + } + }, "EnableLoggingMessage":{ "type":"structure", "required":[ @@ -3733,7 +3785,7 @@ "members":{ "ReservedNodeId":{ "shape":"String", - "documentation":"

A string representing the node identifier for the Reserved Node to be exchanged.

" + "documentation":"

A string representing the node identifier for the DC1 Reserved Node to be exchanged.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4103,6 +4155,18 @@ }, "exception":true }, + "InvalidClusterTrackFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The provided cluster track name is not valid.

", + "error":{ + "code":"InvalidClusterTrack", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidElasticIpFault":{ "type":"structure", "members":{ @@ -4303,6 +4367,24 @@ }, "Long":{"type":"long"}, "LongOptional":{"type":"long"}, + "MaintenanceTrack":{ + "type":"structure", + "members":{ + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the maintenance track. Possible values are current and trailing.

" + }, + "DatabaseVersion":{ + "shape":"String", + "documentation":"

The version number for the cluster release.

" + }, + "UpdateTargets":{ + "shape":"EligibleTracksToUpdateList", + "documentation":"

An array of UpdateTarget objects to update with the maintenance track.

" + } + }, + "documentation":"

Defines a maintenance track that determines which Amazon Redshift version to apply during a maintenance window. If the value for MaintenanceTrack is current, the cluster is updated to the most recently certified maintenance release. If the value is trailing, the cluster is updated to the previously certified maintenance release.

" + }, "ModifyClusterDbRevisionMessage":{ "type":"structure", "required":[ @@ -4426,6 +4508,10 @@ "EnhancedVpcRouting":{ "shape":"BooleanOptional", "documentation":"

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

" + }, + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name for the maintenance track that you want to assign for the cluster. This name change is asynchronous. The new track name stays in the PendingModifiedValues for the cluster until the next maintenance window. When the maintenance track changes, the cluster is switched to the latest cluster release available for the maintenance track. At this point, the maintenance track name is applied.

" } }, "documentation":"

" @@ -4720,6 +4806,10 @@ "EnhancedVpcRouting":{ "shape":"BooleanOptional", "documentation":"

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

" + }, + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the maintenance track that the cluster will change to during the next maintenance window.

" } }, "documentation":"

Describes cluster attributes that are in a pending state. A change to one or more the attributes was requested and is in progress or will be applied.

" @@ -5187,6 +5277,10 @@ "IamRoles":{ "shape":"IamRoleArnList", "documentation":"

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated at any time.

" + }, + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks.

" } }, "documentation":"

" @@ -5535,6 +5629,10 @@ "EnhancedVpcRouting":{ "shape":"Boolean", "documentation":"

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

" + }, + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the maintenance track for the snapshot.

" } }, "documentation":"

Describes a snapshot.

", @@ -6023,6 +6121,26 @@ }, "documentation":"

" }, + "TrackList":{ + "type":"list", + "member":{ + "shape":"MaintenanceTrack", + "locationName":"MaintenanceTrack" + } + }, + "TrackListMessage":{ + "type":"structure", + "members":{ + "MaintenanceTracks":{ + "shape":"TrackList", + "documentation":"

A list of maintenance tracks output by the DescribeClusterTracks operation.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

The starting point to return a set of response tracklist records. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + } + } + }, "UnauthorizedOperation":{ "type":"structure", "members":{ @@ -6071,6 +6189,20 @@ }, "exception":true }, + "UpdateTarget":{ + "type":"structure", + "members":{ + "MaintenanceTrackName":{ + "shape":"String", + "documentation":"

The name of the new maintenance track.

" + }, + "DatabaseVersion":{ + "shape":"String", + "documentation":"

The cluster version for the new maintenance track.

" + } + }, + "documentation":"

A maintenance track that you can switch the current track to.

" + }, "VpcSecurityGroupIdList":{ "type":"list", "member":{ diff --git a/botocore/data/resource-groups/2017-11-27/service-2.json b/botocore/data/resource-groups/2017-11-27/service-2.json index 94b33922..7551439c 100644 --- a/botocore/data/resource-groups/2017-11-27/service-2.json +++ b/botocore/data/resource-groups/2017-11-27/service-2.json @@ -104,8 +104,8 @@ "ListGroupResources":{ "name":"ListGroupResources", "http":{ - "method":"GET", - "requestUri":"/groups/{GroupName}/resource-identifiers" + "method":"POST", + "requestUri":"/groups/{GroupName}/resource-identifiers-list" }, "input":{"shape":"ListGroupResourcesInput"}, "output":{"shape":"ListGroupResourcesOutput"}, @@ -123,8 +123,8 @@ "ListGroups":{ "name":"ListGroups", "http":{ - "method":"GET", - "requestUri":"/groups" + "method":"POST", + "requestUri":"/groups-list" }, "input":{"shape":"ListGroupsInput"}, "output":{"shape":"ListGroupsOutput"}, @@ -247,7 +247,7 @@ "members":{ "Name":{ "shape":"GroupName", - "documentation":"

The name of the group, which is the identifier of the group in other operations. A resource group name cannot be updated after it is created. A resource group name can have a maximum of 127 characters, including letters, numbers, hyphens, dots, and underscores. The name cannot start with AWS or aws; these are reserved. A resource group name must be unique within your account.

" + "documentation":"

The name of the group, which is the identifier of the group in other operations. A resource group name cannot be updated after it is created. A resource group name can have a maximum of 128 characters, including letters, numbers, hyphens, dots, and underscores. The name cannot start with AWS or aws; these are reserved. A resource group name must be unique within your account.

" }, "Description":{ "shape":"GroupDescription", @@ -259,7 +259,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to add to the group. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 127 characters, and tag values can have a maximum length of 255 characters.

" + "documentation":"

The tags to add to the group. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -411,7 +411,7 @@ "GroupDescription":{ "type":"string", "max":512, - "pattern":"[\\sa-zA-Z0-9_\\.-]+" + "pattern":"[\\sa-zA-Z0-9_\\.-]*" }, "GroupList":{ "type":"list", @@ -460,6 +460,10 @@ "location":"uri", "locationName":"GroupName" }, + "Filters":{ + "shape":"ResourceFilterList", + "documentation":"

Filters, formatted as ResourceFilter objects, that you want to apply to a ListGroupResources operation.

" + }, "MaxResults":{ "shape":"MaxResults", "documentation":"

The maximum number of group member ARNs that are returned in a single call by ListGroupResources, in paginated output. By default, this number is 50.

", @@ -551,7 +555,45 @@ }, "ResourceArn":{ "type":"string", - "pattern":"arn:aws:[a-z0-9]*:([a-z]{2}-[a-z]+-\\d{1})?:([0-9]{12})?:.+" + "pattern":"arn:aws:[a-z0-9\\-]*:([a-z]{2}-[a-z]+-\\d{1})?:([0-9]{12})?:.+" + }, + "ResourceFilter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{ + "shape":"ResourceFilterName", + "documentation":"

The name of the filter. Filter names are case-sensitive.

" + }, + "Values":{ + "shape":"ResourceFilterValues", + "documentation":"

One or more filter values. Allowed filter values vary by resource filter name, and are case-sensitive.

" + } + }, + "documentation":"

A filter name and value pair that is used to obtain more specific results from a list of resources.

" + }, + "ResourceFilterList":{ + "type":"list", + "member":{"shape":"ResourceFilter"} + }, + "ResourceFilterName":{ + "type":"string", + "enum":["resource-type"] + }, + "ResourceFilterValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"AWS::[a-zA-Z0-9]+::[a-zA-Z0-9]+" + }, + "ResourceFilterValues":{ + "type":"list", + "member":{"shape":"ResourceFilterValue"}, + "max":5, + "min":1 }, "ResourceIdentifier":{ "type":"structure", @@ -639,7 +681,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to add to the specified resource. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 127 characters, and tag values can have a maximum length of 255 characters.

" + "documentation":"

The tags to add to the specified resource. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 9674a4f6..6ac6bf10 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -26,7 +26,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"

Aborts a multipart upload.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

" + "documentation":"

Aborts a multipart upload.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

" }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", @@ -37,7 +37,7 @@ "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", - "documentation":"Completes a multipart upload by assembling previously uploaded parts." + "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

" }, "CopyObject":{ "name":"CopyObject", @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"Creates a copy of an object that is already stored in Amazon S3.", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -67,7 +67,7 @@ {"shape":"BucketAlreadyOwnedByYou"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", - "documentation":"Creates a new bucket.", + "documentation":"

Creates a new bucket.

", "alias":"PutBucket" }, "CreateMultipartUpload":{ @@ -79,7 +79,7 @@ "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

Initiates a multipart upload and returns an upload ID.

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

", + "documentation":"

Initiates a multipart upload and returns an upload ID.

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

", "alias":"InitiateMultipartUpload" }, "DeleteBucket":{ @@ -90,7 +90,7 @@ }, "input":{"shape":"DeleteBucketRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html", - "documentation":"Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted." + "documentation":"

Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.

" }, "DeleteBucketAnalyticsConfiguration":{ "name":"DeleteBucketAnalyticsConfiguration", @@ -99,7 +99,7 @@ "requestUri":"/{Bucket}?analytics" }, "input":{"shape":"DeleteBucketAnalyticsConfigurationRequest"}, - "documentation":"Deletes an analytics configuration for the bucket (specified by the analytics configuration ID)." + "documentation":"

Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

" }, "DeleteBucketCors":{ "name":"DeleteBucketCors", @@ -109,7 +109,7 @@ }, "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", - "documentation":"Deletes the cors configuration information set for the bucket." + "documentation":"

Deletes the cors configuration information set for the bucket.

" }, "DeleteBucketEncryption":{ "name":"DeleteBucketEncryption", @@ -118,7 +118,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"DeleteBucketEncryptionRequest"}, - "documentation":"Deletes the server-side encryption configuration from the bucket." + "documentation":"

Deletes the server-side encryption configuration from the bucket.

" }, "DeleteBucketInventoryConfiguration":{ "name":"DeleteBucketInventoryConfiguration", @@ -127,7 +127,7 @@ "requestUri":"/{Bucket}?inventory" }, "input":{"shape":"DeleteBucketInventoryConfigurationRequest"}, - "documentation":"Deletes an inventory configuration (identified by the inventory ID) from the bucket." + "documentation":"

Deletes an inventory configuration (identified by the inventory ID) from the bucket.

" }, "DeleteBucketLifecycle":{ "name":"DeleteBucketLifecycle", @@ -137,7 +137,7 @@ }, "input":{"shape":"DeleteBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html", - "documentation":"Deletes the lifecycle configuration from the bucket." + "documentation":"

Deletes the lifecycle configuration from the bucket.

" }, "DeleteBucketMetricsConfiguration":{ "name":"DeleteBucketMetricsConfiguration", @@ -146,7 +146,7 @@ "requestUri":"/{Bucket}?metrics" }, "input":{"shape":"DeleteBucketMetricsConfigurationRequest"}, - "documentation":"Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket." + "documentation":"

Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.

" }, "DeleteBucketPolicy":{ "name":"DeleteBucketPolicy", @@ -156,7 +156,7 @@ }, "input":{"shape":"DeleteBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation":"Deletes the policy from the bucket." + "documentation":"

Deletes the policy from the bucket.

" }, "DeleteBucketReplication":{ "name":"DeleteBucketReplication", @@ -165,7 +165,7 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"DeleteBucketReplicationRequest"}, - "documentation":"Deletes the replication configuration from the bucket." + "documentation":"

Deletes the replication configuration from the bucket.

" }, "DeleteBucketTagging":{ "name":"DeleteBucketTagging", @@ -175,7 +175,7 @@ }, "input":{"shape":"DeleteBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html", - "documentation":"Deletes the tags from the bucket." + "documentation":"

Deletes the tags from the bucket.

" }, "DeleteBucketWebsite":{ "name":"DeleteBucketWebsite", @@ -185,7 +185,7 @@ }, "input":{"shape":"DeleteBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html", - "documentation":"This operation removes the website configuration from the bucket." + "documentation":"

This operation removes the website configuration from the bucket.

" }, "DeleteObject":{ "name":"DeleteObject", @@ -196,7 +196,7 @@ "input":{"shape":"DeleteObjectRequest"}, "output":{"shape":"DeleteObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html", - "documentation":"Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects." + "documentation":"

Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

" }, "DeleteObjectTagging":{ "name":"DeleteObjectTagging", @@ -206,7 +206,7 @@ }, "input":{"shape":"DeleteObjectTaggingRequest"}, "output":{"shape":"DeleteObjectTaggingOutput"}, - "documentation":"Removes the tag-set from an existing object." + "documentation":"

Removes the tag-set from an existing object.

" }, "DeleteObjects":{ "name":"DeleteObjects", @@ -217,7 +217,7 @@ "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", - "documentation":"This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.", + "documentation":"

This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.

", "alias":"DeleteMultipleObjects" }, "GetBucketAccelerateConfiguration":{ @@ -228,7 +228,7 @@ }, "input":{"shape":"GetBucketAccelerateConfigurationRequest"}, "output":{"shape":"GetBucketAccelerateConfigurationOutput"}, - "documentation":"Returns the accelerate configuration of a bucket." + "documentation":"

Returns the accelerate configuration of a bucket.

" }, "GetBucketAcl":{ "name":"GetBucketAcl", @@ -239,7 +239,7 @@ "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", - "documentation":"Gets the access control policy for the bucket." + "documentation":"

Gets the access control policy for the bucket.

" }, "GetBucketAnalyticsConfiguration":{ "name":"GetBucketAnalyticsConfiguration", @@ -249,7 +249,7 @@ }, "input":{"shape":"GetBucketAnalyticsConfigurationRequest"}, "output":{"shape":"GetBucketAnalyticsConfigurationOutput"}, - "documentation":"Gets an analytics configuration for the bucket (specified by the analytics configuration ID)." + "documentation":"

Gets an analytics configuration for the bucket (specified by the analytics configuration ID).

" }, "GetBucketCors":{ "name":"GetBucketCors", @@ -260,7 +260,7 @@ "input":{"shape":"GetBucketCorsRequest"}, "output":{"shape":"GetBucketCorsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html", - "documentation":"Returns the cors configuration for the bucket." + "documentation":"

Returns the cors configuration for the bucket.

" }, "GetBucketEncryption":{ "name":"GetBucketEncryption", @@ -270,7 +270,7 @@ }, "input":{"shape":"GetBucketEncryptionRequest"}, "output":{"shape":"GetBucketEncryptionOutput"}, - "documentation":"Returns the server-side encryption configuration of a bucket." + "documentation":"

Returns the server-side encryption configuration of a bucket.

" }, "GetBucketInventoryConfiguration":{ "name":"GetBucketInventoryConfiguration", @@ -280,7 +280,7 @@ }, "input":{"shape":"GetBucketInventoryConfigurationRequest"}, "output":{"shape":"GetBucketInventoryConfigurationOutput"}, - "documentation":"Returns an inventory configuration (identified by the inventory ID) from the bucket." + "documentation":"

Returns an inventory configuration (identified by the inventory ID) from the bucket.

" }, "GetBucketLifecycle":{ "name":"GetBucketLifecycle", @@ -291,7 +291,7 @@ "input":{"shape":"GetBucketLifecycleRequest"}, "output":{"shape":"GetBucketLifecycleOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", - "documentation":"Deprecated, see the GetBucketLifecycleConfiguration operation.", + "documentation":"

Deprecated, see the GetBucketLifecycleConfiguration operation.

", "deprecated":true }, "GetBucketLifecycleConfiguration":{ @@ -302,7 +302,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationOutput"}, - "documentation":"Returns the lifecycle configuration information set on the bucket." + "documentation":"

Returns the lifecycle configuration information set on the bucket.

" }, "GetBucketLocation":{ "name":"GetBucketLocation", @@ -313,7 +313,7 @@ "input":{"shape":"GetBucketLocationRequest"}, "output":{"shape":"GetBucketLocationOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html", - "documentation":"Returns the region the bucket resides in." + "documentation":"

Returns the region the bucket resides in.

" }, "GetBucketLogging":{ "name":"GetBucketLogging", @@ -324,7 +324,7 @@ "input":{"shape":"GetBucketLoggingRequest"}, "output":{"shape":"GetBucketLoggingOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html", - "documentation":"Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner." + "documentation":"

Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.

" }, "GetBucketMetricsConfiguration":{ "name":"GetBucketMetricsConfiguration", @@ -334,7 +334,7 @@ }, "input":{"shape":"GetBucketMetricsConfigurationRequest"}, "output":{"shape":"GetBucketMetricsConfigurationOutput"}, - "documentation":"Gets a metrics configuration (specified by the metrics configuration ID) from the bucket." + "documentation":"

Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.

" }, "GetBucketNotification":{ "name":"GetBucketNotification", @@ -345,7 +345,7 @@ "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfigurationDeprecated"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETnotification.html", - "documentation":"Deprecated, see the GetBucketNotificationConfiguration operation.", + "documentation":"

Deprecated, see the GetBucketNotificationConfiguration operation.

", "deprecated":true }, "GetBucketNotificationConfiguration":{ @@ -356,7 +356,7 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfiguration"}, - "documentation":"Returns the notification configuration of a bucket." + "documentation":"

Returns the notification configuration of a bucket.

" }, "GetBucketPolicy":{ "name":"GetBucketPolicy", @@ -367,7 +367,7 @@ "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html", - "documentation":"Returns the policy of a specified bucket." + "documentation":"

Returns the policy of a specified bucket.

" }, "GetBucketReplication":{ "name":"GetBucketReplication", @@ -377,7 +377,7 @@ }, "input":{"shape":"GetBucketReplicationRequest"}, "output":{"shape":"GetBucketReplicationOutput"}, - "documentation":"Returns the replication configuration of a bucket." + "documentation":"

Returns the replication configuration of a bucket.

" }, "GetBucketRequestPayment":{ "name":"GetBucketRequestPayment", @@ -388,7 +388,7 @@ "input":{"shape":"GetBucketRequestPaymentRequest"}, "output":{"shape":"GetBucketRequestPaymentOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html", - "documentation":"Returns the request payment configuration of a bucket." + "documentation":"

Returns the request payment configuration of a bucket.

" }, "GetBucketTagging":{ "name":"GetBucketTagging", @@ -399,7 +399,7 @@ "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html", - "documentation":"Returns the tag set associated with the bucket." + "documentation":"

Returns the tag set associated with the bucket.

" }, "GetBucketVersioning":{ "name":"GetBucketVersioning", @@ -410,7 +410,7 @@ "input":{"shape":"GetBucketVersioningRequest"}, "output":{"shape":"GetBucketVersioningOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html", - "documentation":"Returns the versioning state of a bucket." + "documentation":"

Returns the versioning state of a bucket.

" }, "GetBucketWebsite":{ "name":"GetBucketWebsite", @@ -421,7 +421,7 @@ "input":{"shape":"GetBucketWebsiteRequest"}, "output":{"shape":"GetBucketWebsiteOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html", - "documentation":"Returns the website configuration for a bucket." + "documentation":"

Returns the website configuration for a bucket.

" }, "GetObject":{ "name":"GetObject", @@ -435,7 +435,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"Retrieves objects from Amazon S3." + "documentation":"

Retrieves objects from Amazon S3.

" }, "GetObjectAcl":{ "name":"GetObjectAcl", @@ -449,7 +449,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html", - "documentation":"Returns the access control list (ACL) of an object." + "documentation":"

Returns the access control list (ACL) of an object.

" }, "GetObjectTagging":{ "name":"GetObjectTagging", @@ -459,7 +459,7 @@ }, "input":{"shape":"GetObjectTaggingRequest"}, "output":{"shape":"GetObjectTaggingOutput"}, - "documentation":"Returns the tag-set of an object." + "documentation":"

Returns the tag-set of an object.

" }, "GetObjectTorrent":{ "name":"GetObjectTorrent", @@ -470,7 +470,7 @@ "input":{"shape":"GetObjectTorrentRequest"}, "output":{"shape":"GetObjectTorrentOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html", - "documentation":"Return torrent files from a bucket." + "documentation":"

Return torrent files from a bucket.

" }, "HeadBucket":{ "name":"HeadBucket", @@ -483,7 +483,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", - "documentation":"This operation is useful to determine if a bucket exists and you have permission to access it." + "documentation":"

This operation is useful to determine if a bucket exists and you have permission to access it.

" }, "HeadObject":{ "name":"HeadObject", @@ -497,7 +497,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object." + "documentation":"

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

" }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -507,7 +507,7 @@ }, "input":{"shape":"ListBucketAnalyticsConfigurationsRequest"}, "output":{"shape":"ListBucketAnalyticsConfigurationsOutput"}, - "documentation":"Lists the analytics configurations for the bucket." + "documentation":"

Lists the analytics configurations for the bucket.

" }, "ListBucketInventoryConfigurations":{ "name":"ListBucketInventoryConfigurations", @@ -517,7 +517,7 @@ }, "input":{"shape":"ListBucketInventoryConfigurationsRequest"}, "output":{"shape":"ListBucketInventoryConfigurationsOutput"}, - "documentation":"Returns a list of inventory configurations for the bucket." + "documentation":"

Returns a list of inventory configurations for the bucket.

" }, "ListBucketMetricsConfigurations":{ "name":"ListBucketMetricsConfigurations", @@ -527,7 +527,7 @@ }, "input":{"shape":"ListBucketMetricsConfigurationsRequest"}, "output":{"shape":"ListBucketMetricsConfigurationsOutput"}, - "documentation":"Lists the metrics configurations for the bucket." + "documentation":"

Lists the metrics configurations for the bucket.

" }, "ListBuckets":{ "name":"ListBuckets", @@ -537,7 +537,7 @@ }, "output":{"shape":"ListBucketsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", - "documentation":"Returns a list of all buckets owned by the authenticated sender of the request.", + "documentation":"

Returns a list of all buckets owned by the authenticated sender of the request.

", "alias":"GetService" }, "ListMultipartUploads":{ @@ -549,7 +549,7 @@ "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"This operation lists in-progress multipart uploads." + "documentation":"

This operation lists in-progress multipart uploads.

" }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -560,7 +560,7 @@ "input":{"shape":"ListObjectVersionsRequest"}, "output":{"shape":"ListObjectVersionsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", - "documentation":"Returns metadata about all of the versions of objects in a bucket.", + "documentation":"

Returns metadata about all of the versions of objects in a bucket.

", "alias":"GetBucketObjectVersions" }, "ListObjects":{ @@ -575,7 +575,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", - "documentation":"Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.", + "documentation":"

Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.

", "alias":"GetBucket" }, "ListObjectsV2":{ @@ -589,7 +589,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development." + "documentation":"

Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.

" }, "ListParts":{ "name":"ListParts", @@ -600,7 +600,7 @@ "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html", - "documentation":"Lists the parts that have been uploaded for a specific multipart upload." + "documentation":"

Lists the parts that have been uploaded for a specific multipart upload.

" }, "PutBucketAccelerateConfiguration":{ "name":"PutBucketAccelerateConfiguration", @@ -609,7 +609,7 @@ "requestUri":"/{Bucket}?accelerate" }, "input":{"shape":"PutBucketAccelerateConfigurationRequest"}, - "documentation":"Sets the accelerate configuration of an existing bucket." + "documentation":"

Sets the accelerate configuration of an existing bucket.

" }, "PutBucketAcl":{ "name":"PutBucketAcl", @@ -619,7 +619,7 @@ }, "input":{"shape":"PutBucketAclRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "documentation":"Sets the permissions on a bucket using access control lists (ACL)." + "documentation":"

Sets the permissions on a bucket using access control lists (ACL).

" }, "PutBucketAnalyticsConfiguration":{ "name":"PutBucketAnalyticsConfiguration", @@ -628,7 +628,7 @@ "requestUri":"/{Bucket}?analytics" }, "input":{"shape":"PutBucketAnalyticsConfigurationRequest"}, - "documentation":"Sets an analytics configuration for the bucket (specified by the analytics configuration ID)." + "documentation":"

Sets an analytics configuration for the bucket (specified by the analytics configuration ID).

" }, "PutBucketCors":{ "name":"PutBucketCors", @@ -638,7 +638,7 @@ }, "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", - "documentation":"Sets the cors configuration for a bucket." + "documentation":"

Sets the cors configuration for a bucket.

" }, "PutBucketEncryption":{ "name":"PutBucketEncryption", @@ -647,7 +647,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"Creates a new server-side encryption configuration (or replaces an existing one, if present)." + "documentation":"

Creates a new server-side encryption configuration (or replaces an existing one, if present).

" }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", @@ -656,7 +656,7 @@ "requestUri":"/{Bucket}?inventory" }, "input":{"shape":"PutBucketInventoryConfigurationRequest"}, - "documentation":"Adds an inventory configuration (identified by the inventory ID) from the bucket." + "documentation":"

Adds an inventory configuration (identified by the inventory ID) from the bucket.

" }, "PutBucketLifecycle":{ "name":"PutBucketLifecycle", @@ -666,7 +666,7 @@ }, "input":{"shape":"PutBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", - "documentation":"Deprecated, see the PutBucketLifecycleConfiguration operation.", + "documentation":"

Deprecated, see the PutBucketLifecycleConfiguration operation.

", "deprecated":true }, "PutBucketLifecycleConfiguration":{ @@ -676,7 +676,7 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it." + "documentation":"

Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.

" }, "PutBucketLogging":{ "name":"PutBucketLogging", @@ -686,7 +686,7 @@ }, "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "documentation":"Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner." + "documentation":"

Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.

" }, "PutBucketMetricsConfiguration":{ "name":"PutBucketMetricsConfiguration", @@ -695,7 +695,7 @@ "requestUri":"/{Bucket}?metrics" }, "input":{"shape":"PutBucketMetricsConfigurationRequest"}, - "documentation":"Sets a metrics configuration (specified by the metrics configuration ID) for the bucket." + "documentation":"

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.

" }, "PutBucketNotification":{ "name":"PutBucketNotification", @@ -705,7 +705,7 @@ }, "input":{"shape":"PutBucketNotificationRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", - "documentation":"Deprecated, see the PutBucketNotificationConfiguraiton operation.", + "documentation":"

Deprecated, see the PutBucketNotificationConfiguraiton operation.

", "deprecated":true }, "PutBucketNotificationConfiguration":{ @@ -715,7 +715,7 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"Enables notifications of specified events for a bucket." + "documentation":"

Enables notifications of specified events for a bucket.

" }, "PutBucketPolicy":{ "name":"PutBucketPolicy", @@ -725,7 +725,7 @@ }, "input":{"shape":"PutBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html", - "documentation":"Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it." + "documentation":"

Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.

" }, "PutBucketReplication":{ "name":"PutBucketReplication", @@ -734,7 +734,7 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"Creates a new replication configuration (or replaces an existing one, if present)." + "documentation":"

Creates a new replication configuration (or replaces an existing one, if present).

" }, "PutBucketRequestPayment":{ "name":"PutBucketRequestPayment", @@ -744,7 +744,7 @@ }, "input":{"shape":"PutBucketRequestPaymentRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html", - "documentation":"Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html" + "documentation":"

Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html

" }, "PutBucketTagging":{ "name":"PutBucketTagging", @@ -754,7 +754,7 @@ }, "input":{"shape":"PutBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", - "documentation":"Sets the tags for a bucket." + "documentation":"

Sets the tags for a bucket.

" }, "PutBucketVersioning":{ "name":"PutBucketVersioning", @@ -764,7 +764,7 @@ }, "input":{"shape":"PutBucketVersioningRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", - "documentation":"Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner." + "documentation":"

Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

" }, "PutBucketWebsite":{ "name":"PutBucketWebsite", @@ -774,7 +774,7 @@ }, "input":{"shape":"PutBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", - "documentation":"Set the website configuration for a bucket." + "documentation":"

Set the website configuration for a bucket.

" }, "PutObject":{ "name":"PutObject", @@ -785,7 +785,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"Adds an object to a bucket." + "documentation":"

Adds an object to a bucket.

" }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -799,7 +799,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket" + "documentation":"

uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket

" }, "PutObjectTagging":{ "name":"PutObjectTagging", @@ -809,7 +809,7 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "documentation":"Sets the supplied tag-set to an object that already exists in a bucket" + "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket

" }, "RestoreObject":{ "name":"RestoreObject", @@ -823,7 +823,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"Restores an archived copy of an object back into Amazon S3", + "documentation":"

Restores an archived copy of an object back into Amazon S3

", "alias":"PostObjectRestore" }, "SelectObjectContent":{ @@ -838,7 +838,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response." + "documentation":"

This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

" }, "UploadPart":{ "name":"UploadPart", @@ -849,7 +849,7 @@ "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation":"

Uploads a part in a multipart upload.

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

" + "documentation":"

Uploads a part in a multipart upload.

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

" }, "UploadPartCopy":{ "name":"UploadPartCopy", @@ -860,7 +860,7 @@ "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", - "documentation":"Uploads a part by copying data from an existing object as data source." + "documentation":"

Uploads a part by copying data from an existing object as data source.

" } }, "shapes":{ @@ -870,10 +870,10 @@ "members":{ "DaysAfterInitiation":{ "shape":"DaysAfterInitiation", - "documentation":"Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload." + "documentation":"

Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload.

" } }, - "documentation":"Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload." + "documentation":"

Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload.

" }, "AbortMultipartUploadOutput":{ "type":"structure", @@ -921,7 +921,7 @@ "members":{ "Status":{ "shape":"BucketAccelerateStatus", - "documentation":"The accelerate configuration of the bucket." + "documentation":"

The accelerate configuration of the bucket.

" } } }, @@ -931,7 +931,7 @@ "members":{ "Grants":{ "shape":"Grants", - "documentation":"A list of grants.", + "documentation":"

A list of grants.

", "locationName":"AccessControlList" }, "Owner":{"shape":"Owner"} @@ -943,10 +943,10 @@ "members":{ "Owner":{ "shape":"OwnerOverride", - "documentation":"The override value for the owner of the replica object." + "documentation":"

The override value for the owner of the replica object.

" } }, - "documentation":"Container for information regarding the access control for replicas." + "documentation":"

Container for information regarding the access control for replicas.

" }, "AccountId":{"type":"string"}, "AllowQuotedRecordDelimiter":{"type":"boolean"}, @@ -973,11 +973,11 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"The prefix to use when evaluating an AND predicate." + "documentation":"

The prefix to use when evaluating an AND predicate.

" }, "Tags":{ "shape":"TagSet", - "documentation":"The list of tags to use when evaluating an AND predicate.", + "documentation":"

The list of tags to use when evaluating an AND predicate.

", "flattened":true, "locationName":"Tag" } @@ -992,15 +992,15 @@ "members":{ "Id":{ "shape":"AnalyticsId", - "documentation":"The identifier used to represent an analytics configuration." + "documentation":"

The identifier used to represent an analytics configuration.

" }, "Filter":{ "shape":"AnalyticsFilter", - "documentation":"The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis." + "documentation":"

The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.

" }, "StorageClassAnalysis":{ "shape":"StorageClassAnalysis", - "documentation":"If present, it indicates that data related to access patterns will be collected and made available to analyze the tradeoffs between different storage classes." + "documentation":"

If present, it indicates that data related to access patterns will be collected and made available to analyze the tradeoffs between different storage classes.

" } } }, @@ -1015,7 +1015,7 @@ "members":{ "S3BucketDestination":{ "shape":"AnalyticsS3BucketDestination", - "documentation":"A destination signifying output to an S3 bucket." + "documentation":"

A destination signifying output to an S3 bucket.

" } } }, @@ -1024,15 +1024,15 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"The prefix to use when evaluating an analytics filter." + "documentation":"

The prefix to use when evaluating an analytics filter.

" }, "Tag":{ "shape":"Tag", - "documentation":"The tag to use when evaluating an analytics filter." + "documentation":"

The tag to use when evaluating an analytics filter.

" }, "And":{ "shape":"AnalyticsAndOperator", - "documentation":"A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates." + "documentation":"

A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates.

" } } }, @@ -1046,19 +1046,19 @@ "members":{ "Format":{ "shape":"AnalyticsS3ExportFileFormat", - "documentation":"The file format used when exporting data to Amazon S3." + "documentation":"

The file format used when exporting data to Amazon S3.

" }, "BucketAccountId":{ "shape":"AccountId", - "documentation":"The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data." + "documentation":"

The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.

" }, "Bucket":{ "shape":"BucketName", - "documentation":"The Amazon resource name (ARN) of the bucket to which data is exported." + "documentation":"

The Amazon resource name (ARN) of the bucket to which data is exported.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"The prefix to use when exporting data. The exported data begins with this prefix." + "documentation":"

The prefix to use when exporting data. The exported data begins with this prefix.

" } } }, @@ -1072,11 +1072,11 @@ "members":{ "Name":{ "shape":"BucketName", - "documentation":"The name of the bucket." + "documentation":"

The name of the bucket.

" }, "CreationDate":{ "shape":"CreationDate", - "documentation":"Date the bucket was created." + "documentation":"

Date the bucket was created.

" } } }, @@ -1091,7 +1091,7 @@ "type":"structure", "members":{ }, - "documentation":"The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + "documentation":"

The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.

", "exception":true }, "BucketAlreadyOwnedByYou":{ @@ -1186,27 +1186,27 @@ "members":{ "AllowedHeaders":{ "shape":"AllowedHeaders", - "documentation":"Specifies which headers are allowed in a pre-flight OPTIONS request.", + "documentation":"

Specifies which headers are allowed in a pre-flight OPTIONS request.

", "locationName":"AllowedHeader" }, "AllowedMethods":{ "shape":"AllowedMethods", - "documentation":"Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute.", + "documentation":"

Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute.

", "locationName":"AllowedMethod" }, "AllowedOrigins":{ "shape":"AllowedOrigins", - "documentation":"One or more origins you want customers to be able to access the bucket from.", + "documentation":"

One or more origins you want customers to be able to access the bucket from.

", "locationName":"AllowedOrigin" }, "ExposeHeaders":{ "shape":"ExposeHeaders", - "documentation":"One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).", + "documentation":"

One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).

", "locationName":"ExposeHeader" }, "MaxAgeSeconds":{ "shape":"MaxAgeSeconds", - "documentation":"The time in seconds that your browser is to cache the preflight response for the specified resource." + "documentation":"

The time in seconds that your browser is to cache the preflight response for the specified resource.

" } } }, @@ -1220,60 +1220,60 @@ "members":{ "FileHeaderInfo":{ "shape":"FileHeaderInfo", - "documentation":"Describes the first line of input. Valid values: None, Ignore, Use." + "documentation":"

Describes the first line of input. Valid values: None, Ignore, Use.

" }, "Comments":{ "shape":"Comments", - "documentation":"Single character used to indicate a row should be ignored when present at the start of a row." + "documentation":"

Single character used to indicate a row should be ignored when present at the start of a row.

" }, "QuoteEscapeCharacter":{ "shape":"QuoteEscapeCharacter", - "documentation":"Single character used for escaping the quote character inside an already escaped value." + "documentation":"

Single character used for escaping the quote character inside an already escaped value.

" }, "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"Value used to separate individual records." + "documentation":"

Value used to separate individual records.

" }, "FieldDelimiter":{ "shape":"FieldDelimiter", - "documentation":"Value used to separate individual fields in a record." + "documentation":"

Value used to separate individual fields in a record.

" }, "QuoteCharacter":{ "shape":"QuoteCharacter", - "documentation":"Value used for escaping where the field delimiter is part of the value." + "documentation":"

Value used for escaping where the field delimiter is part of the value.

" }, "AllowQuotedRecordDelimiter":{ "shape":"AllowQuotedRecordDelimiter", - "documentation":"Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance." + "documentation":"

Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance.

" } }, - "documentation":"Describes how a CSV-formatted input object is formatted." + "documentation":"

Describes how a CSV-formatted input object is formatted.

" }, "CSVOutput":{ "type":"structure", "members":{ "QuoteFields":{ "shape":"QuoteFields", - "documentation":"Indicates whether or not all output fields should be quoted." + "documentation":"

Indicates whether or not all output fields should be quoted.

" }, "QuoteEscapeCharacter":{ "shape":"QuoteEscapeCharacter", - "documentation":"Single character used for escaping the quote character inside an already escaped value." + "documentation":"

Single character used for escaping the quote character inside an already escaped value.

" }, "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"Value used to separate individual records." + "documentation":"

Value used to separate individual records.

" }, "FieldDelimiter":{ "shape":"FieldDelimiter", - "documentation":"Value used to separate individual fields in a record." + "documentation":"

Value used to separate individual fields in a record.

" }, "QuoteCharacter":{ "shape":"QuoteCharacter", - "documentation":"Value used for escaping where the field delimiter is part of the value." + "documentation":"

Value used for escaping where the field delimiter is part of the value.

" } }, - "documentation":"Describes how CSV-formatted results are formatted." + "documentation":"

Describes how CSV-formatted results are formatted.

" }, "CacheControl":{"type":"string"}, "CloudFunction":{"type":"string"}, @@ -1315,29 +1315,29 @@ "Key":{"shape":"ObjectKey"}, "Expiration":{ "shape":"Expiration", - "documentation":"If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.", + "documentation":"

If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.

", "location":"header", "locationName":"x-amz-expiration" }, "ETag":{ "shape":"ETag", - "documentation":"Entity tag of the object." + "documentation":"

Entity tag of the object.

" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version of the object.", + "documentation":"

Version of the object.

", "location":"header", "locationName":"x-amz-version-id" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1398,11 +1398,11 @@ "members":{ "ETag":{ "shape":"ETag", - "documentation":"Entity tag returned when the part was uploaded." + "documentation":"

Entity tag returned when the part was uploaded.

" }, "PartNumber":{ "shape":"PartNumber", - "documentation":"Part number that identifies the part. This is a positive integer between 1 and 10,000." + "documentation":"

Part number that identifies the part. This is a positive integer between 1 and 10,000.

" } } }, @@ -1415,7 +1415,8 @@ "type":"string", "enum":[ "NONE", - "GZIP" + "GZIP", + "BZIP2" ] }, "Condition":{ @@ -1423,11 +1424,11 @@ "members":{ "HttpErrorCodeReturnedEquals":{ "shape":"HttpErrorCodeReturnedEquals", - "documentation":"The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied." + "documentation":"

The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied.

" }, "KeyPrefixEquals":{ "shape":"KeyPrefixEquals", - "documentation":"The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied." + "documentation":"

The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

" } } }, @@ -1451,7 +1452,7 @@ "CopyObjectResult":{"shape":"CopyObjectResult"}, "Expiration":{ "shape":"Expiration", - "documentation":"If the object expiration is configured, the response includes this header.", + "documentation":"

If the object expiration is configured, the response includes this header.

", "location":"header", "locationName":"x-amz-expiration" }, @@ -1462,31 +1463,31 @@ }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version ID of the newly created copy.", + "documentation":"

Version ID of the newly created copy.

", "location":"header", "locationName":"x-amz-version-id" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1508,7 +1509,7 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"The canned ACL to apply to the object.", + "documentation":"

The canned ACL to apply to the object.

", "location":"header", "locationName":"x-amz-acl" }, @@ -1519,91 +1520,91 @@ }, "CacheControl":{ "shape":"CacheControl", - "documentation":"Specifies caching behavior along the request/reply chain.", + "documentation":"

Specifies caching behavior along the request/reply chain.

", "location":"header", "locationName":"Cache-Control" }, "ContentDisposition":{ "shape":"ContentDisposition", - "documentation":"Specifies presentational information for the object.", + "documentation":"

Specifies presentational information for the object.

", "location":"header", "locationName":"Content-Disposition" }, "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "documentation":"

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

", "location":"header", "locationName":"Content-Encoding" }, "ContentLanguage":{ "shape":"ContentLanguage", - "documentation":"The language the content is in.", + "documentation":"

The language the content is in.

", "location":"header", "locationName":"Content-Language" }, "ContentType":{ "shape":"ContentType", - "documentation":"A standard MIME type describing the format of the object data.", + "documentation":"

A standard MIME type describing the format of the object data.

", "location":"header", "locationName":"Content-Type" }, "CopySource":{ "shape":"CopySource", - "documentation":"The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.", + "documentation":"

The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.

", "location":"header", "locationName":"x-amz-copy-source" }, "CopySourceIfMatch":{ "shape":"CopySourceIfMatch", - "documentation":"Copies the object if its entity tag (ETag) matches the specified tag.", + "documentation":"

Copies the object if its entity tag (ETag) matches the specified tag.

", "location":"header", "locationName":"x-amz-copy-source-if-match" }, "CopySourceIfModifiedSince":{ "shape":"CopySourceIfModifiedSince", - "documentation":"Copies the object if it has been modified since the specified time.", + "documentation":"

Copies the object if it has been modified since the specified time.

", "location":"header", "locationName":"x-amz-copy-source-if-modified-since" }, "CopySourceIfNoneMatch":{ "shape":"CopySourceIfNoneMatch", - "documentation":"Copies the object if its entity tag (ETag) is different than the specified ETag.", + "documentation":"

Copies the object if its entity tag (ETag) is different than the specified ETag.

", "location":"header", "locationName":"x-amz-copy-source-if-none-match" }, "CopySourceIfUnmodifiedSince":{ "shape":"CopySourceIfUnmodifiedSince", - "documentation":"Copies the object if it hasn't been modified since the specified time.", + "documentation":"

Copies the object if it hasn't been modified since the specified time.

", "location":"header", "locationName":"x-amz-copy-source-if-unmodified-since" }, "Expires":{ "shape":"Expires", - "documentation":"The date and time at which the object is no longer cacheable.", + "documentation":"

The date and time at which the object is no longer cacheable.

", "location":"header", "locationName":"Expires" }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "documentation":"

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"Allows grantee to read the object data and its metadata.", + "documentation":"

Allows grantee to read the object data and its metadata.

", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"Allows grantee to read the object ACL.", + "documentation":"

Allows grantee to read the object ACL.

", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"Allows grantee to write the ACL for the applicable object.", + "documentation":"

Allows grantee to write the ACL for the applicable object.

", "location":"header", "locationName":"x-amz-grant-write-acp" }, @@ -1614,79 +1615,79 @@ }, "Metadata":{ "shape":"Metadata", - "documentation":"A map of metadata to store with the object in S3.", + "documentation":"

A map of metadata to store with the object in S3.

", "location":"headers", "locationName":"x-amz-meta-" }, "MetadataDirective":{ "shape":"MetadataDirective", - "documentation":"Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.", + "documentation":"

Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.

", "location":"header", "locationName":"x-amz-metadata-directive" }, "TaggingDirective":{ "shape":"TaggingDirective", - "documentation":"Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request.", + "documentation":"

Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request.

", "location":"header", "locationName":"x-amz-tagging-directive" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The type of storage to use for the object. Defaults to 'STANDARD'.", + "documentation":"

The type of storage to use for the object. Defaults to 'STANDARD'.

", "location":"header", "locationName":"x-amz-storage-class" }, "WebsiteRedirectLocation":{ "shape":"WebsiteRedirectLocation", - "documentation":"If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "documentation":"

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

", "location":"header", "locationName":"x-amz-website-redirect-location" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "documentation":"

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "CopySourceSSECustomerAlgorithm":{ "shape":"CopySourceSSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use when decrypting the source object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use when decrypting the source object (e.g., AES256).

", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" }, "CopySourceSSECustomerKey":{ "shape":"CopySourceSSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.

", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-key" }, "CopySourceSSECustomerKeyMD5":{ "shape":"CopySourceSSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" }, @@ -1697,7 +1698,7 @@ }, "Tagging":{ "shape":"TaggingHeader", - "documentation":"The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters", + "documentation":"

The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters

", "location":"header", "locationName":"x-amz-tagging" } @@ -1715,11 +1716,11 @@ "members":{ "ETag":{ "shape":"ETag", - "documentation":"Entity tag of the object." + "documentation":"

Entity tag of the object.

" }, "LastModified":{ "shape":"LastModified", - "documentation":"Date and time at which the object was uploaded." + "documentation":"

Date and time at which the object was uploaded.

" } } }, @@ -1744,7 +1745,7 @@ "members":{ "LocationConstraint":{ "shape":"BucketLocationConstraint", - "documentation":"Specifies the region where the bucket will be created. If you don't specify a region, the bucket will be created in US Standard." + "documentation":"

Specifies the region where the bucket will be created. If you don't specify a region, the bucket will be created in US Standard.

" } } }, @@ -1764,7 +1765,7 @@ "members":{ "ACL":{ "shape":"BucketCannedACL", - "documentation":"The canned ACL to apply to the bucket.", + "documentation":"

The canned ACL to apply to the bucket.

", "location":"header", "locationName":"x-amz-acl" }, @@ -1780,31 +1781,31 @@ }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "documentation":"

Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"Allows grantee to list the objects in the bucket.", + "documentation":"

Allows grantee to list the objects in the bucket.

", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"Allows grantee to read the bucket ACL.", + "documentation":"

Allows grantee to read the bucket ACL.

", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWrite":{ "shape":"GrantWrite", - "documentation":"Allows grantee to create, overwrite, and delete any object in the bucket.", + "documentation":"

Allows grantee to create, overwrite, and delete any object in the bucket.

", "location":"header", "locationName":"x-amz-grant-write" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"Allows grantee to write the ACL for the applicable bucket.", + "documentation":"

Allows grantee to write the ACL for the applicable bucket.

", "location":"header", "locationName":"x-amz-grant-write-acp" } @@ -1816,50 +1817,50 @@ "members":{ "AbortDate":{ "shape":"AbortDate", - "documentation":"Date when multipart upload will become eligible for abort operation by lifecycle.", + "documentation":"

Date when multipart upload will become eligible for abort operation by lifecycle.

", "location":"header", "locationName":"x-amz-abort-date" }, "AbortRuleId":{ "shape":"AbortRuleId", - "documentation":"Id of the lifecycle rule that makes a multipart upload eligible for abort operation.", + "documentation":"

Id of the lifecycle rule that makes a multipart upload eligible for abort operation.

", "location":"header", "locationName":"x-amz-abort-rule-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to which the multipart upload was initiated.", + "documentation":"

Name of the bucket to which the multipart upload was initiated.

", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"Object key for which the multipart upload was initiated." + "documentation":"

Object key for which the multipart upload was initiated.

" }, "UploadId":{ "shape":"MultipartUploadId", - "documentation":"ID for the initiated multipart upload." + "documentation":"

ID for the initiated multipart upload.

" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1879,7 +1880,7 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"The canned ACL to apply to the object.", + "documentation":"

The canned ACL to apply to the object.

", "location":"header", "locationName":"x-amz-acl" }, @@ -1890,61 +1891,61 @@ }, "CacheControl":{ "shape":"CacheControl", - "documentation":"Specifies caching behavior along the request/reply chain.", + "documentation":"

Specifies caching behavior along the request/reply chain.

", "location":"header", "locationName":"Cache-Control" }, "ContentDisposition":{ "shape":"ContentDisposition", - "documentation":"Specifies presentational information for the object.", + "documentation":"

Specifies presentational information for the object.

", "location":"header", "locationName":"Content-Disposition" }, "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "documentation":"

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

", "location":"header", "locationName":"Content-Encoding" }, "ContentLanguage":{ "shape":"ContentLanguage", - "documentation":"The language the content is in.", + "documentation":"

The language the content is in.

", "location":"header", "locationName":"Content-Language" }, "ContentType":{ "shape":"ContentType", - "documentation":"A standard MIME type describing the format of the object data.", + "documentation":"

A standard MIME type describing the format of the object data.

", "location":"header", "locationName":"Content-Type" }, "Expires":{ "shape":"Expires", - "documentation":"The date and time at which the object is no longer cacheable.", + "documentation":"

The date and time at which the object is no longer cacheable.

", "location":"header", "locationName":"Expires" }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "documentation":"

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"Allows grantee to read the object data and its metadata.", + "documentation":"

Allows grantee to read the object data and its metadata.

", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"Allows grantee to read the object ACL.", + "documentation":"

Allows grantee to read the object ACL.

", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"Allows grantee to write the ACL for the applicable object.", + "documentation":"

Allows grantee to write the ACL for the applicable object.

", "location":"header", "locationName":"x-amz-grant-write-acp" }, @@ -1955,49 +1956,49 @@ }, "Metadata":{ "shape":"Metadata", - "documentation":"A map of metadata to store with the object in S3.", + "documentation":"

A map of metadata to store with the object in S3.

", "location":"headers", "locationName":"x-amz-meta-" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The type of storage to use for the object. Defaults to 'STANDARD'.", + "documentation":"

The type of storage to use for the object. Defaults to 'STANDARD'.

", "location":"header", "locationName":"x-amz-storage-class" }, "WebsiteRedirectLocation":{ "shape":"WebsiteRedirectLocation", - "documentation":"If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "documentation":"

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

", "location":"header", "locationName":"x-amz-website-redirect-location" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "documentation":"

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2008,7 +2009,7 @@ }, "Tagging":{ "shape":"TaggingHeader", - "documentation":"The tag-set for the object. The tag-set must be encoded as URL Query parameters", + "documentation":"

The tag-set for the object. The tag-set must be encoded as URL Query parameters

", "location":"header", "locationName":"x-amz-tagging" } @@ -2031,7 +2032,7 @@ }, "Quiet":{ "shape":"Quiet", - "documentation":"Element to enable quiet mode for the request. When you add this element, you must set its value to true." + "documentation":"

Element to enable quiet mode for the request. When you add this element, you must set its value to true.

" } } }, @@ -2044,13 +2045,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket from which an analytics configuration is deleted.", + "documentation":"

The name of the bucket from which an analytics configuration is deleted.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"AnalyticsId", - "documentation":"The identifier used to represent an analytics configuration.", + "documentation":"

The identifier used to represent an analytics configuration.

", "location":"querystring", "locationName":"id" } @@ -2073,7 +2074,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the server-side encryption configuration to delete.", + "documentation":"

The name of the bucket containing the server-side encryption configuration to delete.

", "location":"uri", "locationName":"Bucket" } @@ -2088,13 +2089,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the inventory configuration to delete.", + "documentation":"

The name of the bucket containing the inventory configuration to delete.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"InventoryId", - "documentation":"The ID used to identify the inventory configuration.", + "documentation":"

The ID used to identify the inventory configuration.

", "location":"querystring", "locationName":"id" } @@ -2120,13 +2121,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the metrics configuration to delete.", + "documentation":"

The name of the bucket containing the metrics configuration to delete.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"MetricsId", - "documentation":"The ID used to identify the metrics configuration.", + "documentation":"

The ID used to identify the metrics configuration.

", "location":"querystring", "locationName":"id" } @@ -2194,19 +2195,19 @@ "Owner":{"shape":"Owner"}, "Key":{ "shape":"ObjectKey", - "documentation":"The object key." + "documentation":"

The object key.

" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version ID of an object." + "documentation":"

Version ID of an object.

" }, "IsLatest":{ "shape":"IsLatest", - "documentation":"Specifies whether the object is (true) or is not (false) the latest version of an object." + "documentation":"

Specifies whether the object is (true) or is not (false) the latest version of an object.

" }, "LastModified":{ "shape":"LastModified", - "documentation":"Date and time the object was last modified." + "documentation":"

Date and time the object was last modified.

" } } }, @@ -2221,13 +2222,13 @@ "members":{ "DeleteMarker":{ "shape":"DeleteMarker", - "documentation":"Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.", + "documentation":"

Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.

", "location":"header", "locationName":"x-amz-delete-marker" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Returns the version ID of the delete marker created as a result of the DELETE operation.", + "documentation":"

Returns the version ID of the delete marker created as a result of the DELETE operation.

", "location":"header", "locationName":"x-amz-version-id" }, @@ -2257,13 +2258,13 @@ }, "MFA":{ "shape":"MFA", - "documentation":"The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "documentation":"

The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

", "location":"header", "locationName":"x-amz-mfa" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"VersionId used to reference a specific version of the object.", + "documentation":"

VersionId used to reference a specific version of the object.

", "location":"querystring", "locationName":"versionId" }, @@ -2279,7 +2280,7 @@ "members":{ "VersionId":{ "shape":"ObjectVersionId", - "documentation":"The versionId of the object the tag-set was removed from.", + "documentation":"

The versionId of the object the tag-set was removed from.

", "location":"header", "locationName":"x-amz-version-id" } @@ -2304,7 +2305,7 @@ }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"The versionId of the object that the tag-set will be removed from.", + "documentation":"

The versionId of the object that the tag-set will be removed from.

", "location":"querystring", "locationName":"versionId" } @@ -2344,7 +2345,7 @@ }, "MFA":{ "shape":"MFA", - "documentation":"The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "documentation":"

The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

", "location":"header", "locationName":"x-amz-mfa" }, @@ -2378,26 +2379,26 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule." + "documentation":"

Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.

" }, "Account":{ "shape":"AccountId", - "documentation":"Account ID of the destination bucket. Currently this is only being verified if Access Control Translation is enabled" + "documentation":"

Account ID of the destination bucket. Currently this is only being verified if Access Control Translation is enabled

" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" }, "AccessControlTranslation":{ "shape":"AccessControlTranslation", - "documentation":"Container for information regarding the access control for replicas." + "documentation":"

Container for information regarding the access control for replicas.

" }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", - "documentation":"Container for information regarding encryption based configuration for replicas." + "documentation":"

Container for information regarding encryption based configuration for replicas.

" } }, - "documentation":"Container for replication destination information." + "documentation":"

Container for replication destination information.

" }, "DisplayName":{"type":"string"}, "ETag":{"type":"string"}, @@ -2405,7 +2406,7 @@ "EnableRequestProgress":{"type":"boolean"}, "EncodingType":{ "type":"string", - "documentation":"Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.", + "documentation":"

Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.

", "enum":["url"] }, "Encryption":{ @@ -2414,28 +2415,28 @@ "members":{ "EncryptionType":{ "shape":"ServerSideEncryption", - "documentation":"The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms)." + "documentation":"

The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).

" }, "KMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results." + "documentation":"

If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.

" }, "KMSContext":{ "shape":"KMSContext", - "documentation":"If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results." + "documentation":"

If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results.

" } }, - "documentation":"Describes the server-side encryption that will be applied to the restore results." + "documentation":"

Describes the server-side encryption that will be applied to the restore results.

" }, "EncryptionConfiguration":{ "type":"structure", "members":{ "ReplicaKmsKeyID":{ "shape":"ReplicaKmsKeyID", - "documentation":"The id of the KMS key used to encrypt the replica object." + "documentation":"

The id of the KMS key used to encrypt the replica object.

" } }, - "documentation":"Container for information regarding encryption based configuration for replicas." + "documentation":"

Container for information regarding encryption based configuration for replicas.

" }, "EndEvent":{ "type":"structure", @@ -2458,7 +2459,7 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"The object key name to use when a 4XX class error occurs." + "documentation":"

The object key name to use when a 4XX class error occurs.

" } } }, @@ -2469,7 +2470,7 @@ }, "Event":{ "type":"string", - "documentation":"Bucket event for which to send notifications.", + "documentation":"

Bucket event for which to send notifications.

", "enum":[ "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", @@ -2523,16 +2524,16 @@ "members":{ "Name":{ "shape":"FilterRuleName", - "documentation":"Object key name prefix or suffix identifying one or more objects to which the filtering rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide." + "documentation":"

Object key name prefix or suffix identifying one or more objects to which the filtering rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.

" }, "Value":{"shape":"FilterRuleValue"} }, - "documentation":"Container for key value pair that defines the criteria for the filter rule." + "documentation":"

Container for key value pair that defines the criteria for the filter rule.

" }, "FilterRuleList":{ "type":"list", "member":{"shape":"FilterRule"}, - "documentation":"A list of containers for key value pair that defines the criteria for the filter rule.", + "documentation":"

A list of containers for key value pair that defines the criteria for the filter rule.

", "flattened":true }, "FilterRuleName":{ @@ -2548,7 +2549,7 @@ "members":{ "Status":{ "shape":"BucketAccelerateStatus", - "documentation":"The accelerate configuration of the bucket." + "documentation":"

The accelerate configuration of the bucket.

" } } }, @@ -2558,7 +2559,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket for which the accelerate configuration is retrieved.", + "documentation":"

Name of the bucket for which the accelerate configuration is retrieved.

", "location":"uri", "locationName":"Bucket" } @@ -2570,7 +2571,7 @@ "Owner":{"shape":"Owner"}, "Grants":{ "shape":"Grants", - "documentation":"A list of grants.", + "documentation":"

A list of grants.

", "locationName":"AccessControlList" } } @@ -2591,7 +2592,7 @@ "members":{ "AnalyticsConfiguration":{ "shape":"AnalyticsConfiguration", - "documentation":"The configuration and any analyses for the analytics filter." + "documentation":"

The configuration and any analyses for the analytics filter.

" } }, "payload":"AnalyticsConfiguration" @@ -2605,13 +2606,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket from which an analytics configuration is retrieved.", + "documentation":"

The name of the bucket from which an analytics configuration is retrieved.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"AnalyticsId", - "documentation":"The identifier used to represent an analytics configuration.", + "documentation":"

The identifier used to represent an analytics configuration.

", "location":"querystring", "locationName":"id" } @@ -2650,7 +2651,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket from which the server-side encryption configuration is retrieved.", + "documentation":"

The name of the bucket from which the server-side encryption configuration is retrieved.

", "location":"uri", "locationName":"Bucket" } @@ -2661,7 +2662,7 @@ "members":{ "InventoryConfiguration":{ "shape":"InventoryConfiguration", - "documentation":"Specifies the inventory configuration." + "documentation":"

Specifies the inventory configuration.

" } }, "payload":"InventoryConfiguration" @@ -2675,13 +2676,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the inventory configuration to retrieve.", + "documentation":"

The name of the bucket containing the inventory configuration to retrieve.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"InventoryId", - "documentation":"The ID used to identify the inventory configuration.", + "documentation":"

The ID used to identify the inventory configuration.

", "location":"querystring", "locationName":"id" } @@ -2766,7 +2767,7 @@ "members":{ "MetricsConfiguration":{ "shape":"MetricsConfiguration", - "documentation":"Specifies the metrics configuration." + "documentation":"

Specifies the metrics configuration.

" } }, "payload":"MetricsConfiguration" @@ -2780,13 +2781,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the metrics configuration to retrieve.", + "documentation":"

The name of the bucket containing the metrics configuration to retrieve.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"MetricsId", - "documentation":"The ID used to identify the metrics configuration.", + "documentation":"

The ID used to identify the metrics configuration.

", "location":"querystring", "locationName":"id" } @@ -2798,7 +2799,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to get the notification configuration for.", + "documentation":"

Name of the bucket to get the notification configuration for.

", "location":"uri", "locationName":"Bucket" } @@ -2809,7 +2810,7 @@ "members":{ "Policy":{ "shape":"Policy", - "documentation":"The bucket policy as a JSON document." + "documentation":"

The bucket policy as a JSON document.

" } }, "payload":"Policy" @@ -2848,7 +2849,7 @@ "members":{ "Payer":{ "shape":"Payer", - "documentation":"Specifies who pays for the download and request fees." + "documentation":"

Specifies who pays for the download and request fees.

" } } }, @@ -2886,11 +2887,11 @@ "members":{ "Status":{ "shape":"BucketVersioningStatus", - "documentation":"The versioning state of the bucket." + "documentation":"

The versioning state of the bucket.

" }, "MFADelete":{ "shape":"MFADeleteStatus", - "documentation":"Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.", + "documentation":"

Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.

", "locationName":"MfaDelete" } } @@ -2932,7 +2933,7 @@ "Owner":{"shape":"Owner"}, "Grants":{ "shape":"Grants", - "documentation":"A list of grants.", + "documentation":"

A list of grants.

", "locationName":"AccessControlList" }, "RequestCharged":{ @@ -2961,7 +2962,7 @@ }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"VersionId used to reference a specific version of the object.", + "documentation":"

VersionId used to reference a specific version of the object.

", "location":"querystring", "locationName":"versionId" }, @@ -2977,12 +2978,12 @@ "members":{ "Body":{ "shape":"Body", - "documentation":"Object data.", + "documentation":"

Object data.

", "streaming":true }, "DeleteMarker":{ "shape":"DeleteMarker", - "documentation":"Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.", + "documentation":"

Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.

", "location":"header", "locationName":"x-amz-delete-marker" }, @@ -2993,121 +2994,121 @@ }, "Expiration":{ "shape":"Expiration", - "documentation":"If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.", + "documentation":"

If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.

", "location":"header", "locationName":"x-amz-expiration" }, "Restore":{ "shape":"Restore", - "documentation":"Provides information about object restoration operation and expiration time of the restored object copy.", + "documentation":"

Provides information about object restoration operation and expiration time of the restored object copy.

", "location":"header", "locationName":"x-amz-restore" }, "LastModified":{ "shape":"LastModified", - "documentation":"Last modified date of the object", + "documentation":"

Last modified date of the object

", "location":"header", "locationName":"Last-Modified" }, "ContentLength":{ "shape":"ContentLength", - "documentation":"Size of the body in bytes.", + "documentation":"

Size of the body in bytes.

", "location":"header", "locationName":"Content-Length" }, "ETag":{ "shape":"ETag", - "documentation":"An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL", + "documentation":"

An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL

", "location":"header", "locationName":"ETag" }, "MissingMeta":{ "shape":"MissingMeta", - "documentation":"This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.", + "documentation":"

This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

", "location":"header", "locationName":"x-amz-missing-meta" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version of the object.", + "documentation":"

Version of the object.

", "location":"header", "locationName":"x-amz-version-id" }, "CacheControl":{ "shape":"CacheControl", - "documentation":"Specifies caching behavior along the request/reply chain.", + "documentation":"

Specifies caching behavior along the request/reply chain.

", "location":"header", "locationName":"Cache-Control" }, "ContentDisposition":{ "shape":"ContentDisposition", - "documentation":"Specifies presentational information for the object.", + "documentation":"

Specifies presentational information for the object.

", "location":"header", "locationName":"Content-Disposition" }, "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "documentation":"

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

", "location":"header", "locationName":"Content-Encoding" }, "ContentLanguage":{ "shape":"ContentLanguage", - "documentation":"The language the content is in.", + "documentation":"

The language the content is in.

", "location":"header", "locationName":"Content-Language" }, "ContentRange":{ "shape":"ContentRange", - "documentation":"The portion of the object returned in the response.", + "documentation":"

The portion of the object returned in the response.

", "location":"header", "locationName":"Content-Range" }, "ContentType":{ "shape":"ContentType", - "documentation":"A standard MIME type describing the format of the object data.", + "documentation":"

A standard MIME type describing the format of the object data.

", "location":"header", "locationName":"Content-Type" }, "Expires":{ "shape":"Expires", - "documentation":"The date and time at which the object is no longer cacheable.", + "documentation":"

The date and time at which the object is no longer cacheable.

", "location":"header", "locationName":"Expires" }, "WebsiteRedirectLocation":{ "shape":"WebsiteRedirectLocation", - "documentation":"If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "documentation":"

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

", "location":"header", "locationName":"x-amz-website-redirect-location" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "Metadata":{ "shape":"Metadata", - "documentation":"A map of metadata to store with the object in S3.", + "documentation":"

A map of metadata to store with the object in S3.

", "location":"headers", "locationName":"x-amz-meta-" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -3128,13 +3129,13 @@ }, "PartsCount":{ "shape":"PartsCount", - "documentation":"The count of parts this object has.", + "documentation":"

The count of parts this object has.

", "location":"header", "locationName":"x-amz-mp-parts-count" }, "TagCount":{ "shape":"TagCount", - "documentation":"The number of tags, if any, on the object.", + "documentation":"

The number of tags, if any, on the object.

", "location":"header", "locationName":"x-amz-tagging-count" } @@ -3155,25 +3156,25 @@ }, "IfMatch":{ "shape":"IfMatch", - "documentation":"Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).", + "documentation":"

Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).

", "location":"header", "locationName":"If-Match" }, "IfModifiedSince":{ "shape":"IfModifiedSince", - "documentation":"Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).", + "documentation":"

Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).

", "location":"header", "locationName":"If-Modified-Since" }, "IfNoneMatch":{ "shape":"IfNoneMatch", - "documentation":"Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).", + "documentation":"

Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).

", "location":"header", "locationName":"If-None-Match" }, "IfUnmodifiedSince":{ "shape":"IfUnmodifiedSince", - "documentation":"Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).", + "documentation":"

Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).

", "location":"header", "locationName":"If-Unmodified-Since" }, @@ -3184,67 +3185,67 @@ }, "Range":{ "shape":"Range", - "documentation":"Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.", + "documentation":"

Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

", "location":"header", "locationName":"Range" }, "ResponseCacheControl":{ "shape":"ResponseCacheControl", - "documentation":"Sets the Cache-Control header of the response.", + "documentation":"

Sets the Cache-Control header of the response.

", "location":"querystring", "locationName":"response-cache-control" }, "ResponseContentDisposition":{ "shape":"ResponseContentDisposition", - "documentation":"Sets the Content-Disposition header of the response", + "documentation":"

Sets the Content-Disposition header of the response

", "location":"querystring", "locationName":"response-content-disposition" }, "ResponseContentEncoding":{ "shape":"ResponseContentEncoding", - "documentation":"Sets the Content-Encoding header of the response.", + "documentation":"

Sets the Content-Encoding header of the response.

", "location":"querystring", "locationName":"response-content-encoding" }, "ResponseContentLanguage":{ "shape":"ResponseContentLanguage", - "documentation":"Sets the Content-Language header of the response.", + "documentation":"

Sets the Content-Language header of the response.

", "location":"querystring", "locationName":"response-content-language" }, "ResponseContentType":{ "shape":"ResponseContentType", - "documentation":"Sets the Content-Type header of the response.", + "documentation":"

Sets the Content-Type header of the response.

", "location":"querystring", "locationName":"response-content-type" }, "ResponseExpires":{ "shape":"ResponseExpires", - "documentation":"Sets the Expires header of the response.", + "documentation":"

Sets the Expires header of the response.

", "location":"querystring", "locationName":"response-expires" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"VersionId used to reference a specific version of the object.", + "documentation":"

VersionId used to reference a specific version of the object.

", "location":"querystring", "locationName":"versionId" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -3255,7 +3256,7 @@ }, "PartNumber":{ "shape":"PartNumber", - "documentation":"Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object.", + "documentation":"

Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object.

", "location":"querystring", "locationName":"partNumber" } @@ -3342,7 +3343,7 @@ "members":{ "Tier":{ "shape":"Tier", - "documentation":"Glacier retrieval tier at which the restore will be processed." + "documentation":"

Glacier retrieval tier at which the restore will be processed.

" } } }, @@ -3352,7 +3353,7 @@ "Grantee":{"shape":"Grantee"}, "Permission":{ "shape":"Permission", - "documentation":"Specifies the permission given to the grantee." + "documentation":"

Specifies the permission given to the grantee.

" } } }, @@ -3367,25 +3368,25 @@ "members":{ "DisplayName":{ "shape":"DisplayName", - "documentation":"Screen name of the grantee." + "documentation":"

Screen name of the grantee.

" }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"Email address of the grantee." + "documentation":"

Email address of the grantee.

" }, "ID":{ "shape":"ID", - "documentation":"The canonical user ID of the grantee." + "documentation":"

The canonical user ID of the grantee.

" }, "Type":{ "shape":"Type", - "documentation":"Type of grantee", + "documentation":"

Type of grantee

", "locationName":"xsi:type", "xmlAttribute":true }, "URI":{ "shape":"URI", - "documentation":"URI of the grantee group." + "documentation":"

URI of the grantee group.

" } }, "xmlNamespace":{ @@ -3416,7 +3417,7 @@ "members":{ "DeleteMarker":{ "shape":"DeleteMarker", - "documentation":"Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.", + "documentation":"

Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.

", "location":"header", "locationName":"x-amz-delete-marker" }, @@ -3427,115 +3428,115 @@ }, "Expiration":{ "shape":"Expiration", - "documentation":"If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.", + "documentation":"

If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.

", "location":"header", "locationName":"x-amz-expiration" }, "Restore":{ "shape":"Restore", - "documentation":"Provides information about object restoration operation and expiration time of the restored object copy.", + "documentation":"

Provides information about object restoration operation and expiration time of the restored object copy.

", "location":"header", "locationName":"x-amz-restore" }, "LastModified":{ "shape":"LastModified", - "documentation":"Last modified date of the object", + "documentation":"

Last modified date of the object

", "location":"header", "locationName":"Last-Modified" }, "ContentLength":{ "shape":"ContentLength", - "documentation":"Size of the body in bytes.", + "documentation":"

Size of the body in bytes.

", "location":"header", "locationName":"Content-Length" }, "ETag":{ "shape":"ETag", - "documentation":"An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL", + "documentation":"

An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL

", "location":"header", "locationName":"ETag" }, "MissingMeta":{ "shape":"MissingMeta", - "documentation":"This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.", + "documentation":"

This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

", "location":"header", "locationName":"x-amz-missing-meta" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version of the object.", + "documentation":"

Version of the object.

", "location":"header", "locationName":"x-amz-version-id" }, "CacheControl":{ "shape":"CacheControl", - "documentation":"Specifies caching behavior along the request/reply chain.", + "documentation":"

Specifies caching behavior along the request/reply chain.

", "location":"header", "locationName":"Cache-Control" }, "ContentDisposition":{ "shape":"ContentDisposition", - "documentation":"Specifies presentational information for the object.", + "documentation":"

Specifies presentational information for the object.

", "location":"header", "locationName":"Content-Disposition" }, "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "documentation":"

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

", "location":"header", "locationName":"Content-Encoding" }, "ContentLanguage":{ "shape":"ContentLanguage", - "documentation":"The language the content is in.", + "documentation":"

The language the content is in.

", "location":"header", "locationName":"Content-Language" }, "ContentType":{ "shape":"ContentType", - "documentation":"A standard MIME type describing the format of the object data.", + "documentation":"

A standard MIME type describing the format of the object data.

", "location":"header", "locationName":"Content-Type" }, "Expires":{ "shape":"Expires", - "documentation":"The date and time at which the object is no longer cacheable.", + "documentation":"

The date and time at which the object is no longer cacheable.

", "location":"header", "locationName":"Expires" }, "WebsiteRedirectLocation":{ "shape":"WebsiteRedirectLocation", - "documentation":"If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "documentation":"

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

", "location":"header", "locationName":"x-amz-website-redirect-location" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "Metadata":{ "shape":"Metadata", - "documentation":"A map of metadata to store with the object in S3.", + "documentation":"

A map of metadata to store with the object in S3.

", "location":"headers", "locationName":"x-amz-meta-" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -3556,7 +3557,7 @@ }, "PartsCount":{ "shape":"PartsCount", - "documentation":"The count of parts this object has.", + "documentation":"

The count of parts this object has.

", "location":"header", "locationName":"x-amz-mp-parts-count" } @@ -3576,25 +3577,25 @@ }, "IfMatch":{ "shape":"IfMatch", - "documentation":"Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).", + "documentation":"

Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).

", "location":"header", "locationName":"If-Match" }, "IfModifiedSince":{ "shape":"IfModifiedSince", - "documentation":"Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).", + "documentation":"

Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).

", "location":"header", "locationName":"If-Modified-Since" }, "IfNoneMatch":{ "shape":"IfNoneMatch", - "documentation":"Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).", + "documentation":"

Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).

", "location":"header", "locationName":"If-None-Match" }, "IfUnmodifiedSince":{ "shape":"IfUnmodifiedSince", - "documentation":"Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).", + "documentation":"

Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).

", "location":"header", "locationName":"If-Unmodified-Since" }, @@ -3605,31 +3606,31 @@ }, "Range":{ "shape":"Range", - "documentation":"Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.", + "documentation":"

Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

", "location":"header", "locationName":"Range" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"VersionId used to reference a specific version of the object.", + "documentation":"

VersionId used to reference a specific version of the object.

", "location":"querystring", "locationName":"versionId" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -3640,7 +3641,7 @@ }, "PartNumber":{ "shape":"PartNumber", - "documentation":"Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.", + "documentation":"

Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.

", "location":"querystring", "locationName":"partNumber" } @@ -3660,7 +3661,7 @@ "members":{ "Suffix":{ "shape":"Suffix", - "documentation":"A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character." + "documentation":"

A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

" } } }, @@ -3670,11 +3671,11 @@ "members":{ "ID":{ "shape":"ID", - "documentation":"If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value." + "documentation":"

If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.

" }, "DisplayName":{ "shape":"DisplayName", - "documentation":"Name of the Principal." + "documentation":"

Name of the Principal.

" } } }, @@ -3683,18 +3684,18 @@ "members":{ "CSV":{ "shape":"CSVInput", - "documentation":"Describes the serialization of a CSV-encoded object." + "documentation":"

Describes the serialization of a CSV-encoded object.

" }, "CompressionType":{ "shape":"CompressionType", - "documentation":"Specifies object's compression format. Valid values: NONE, GZIP. Default Value: NONE." + "documentation":"

Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default Value: NONE.

" }, "JSON":{ "shape":"JSONInput", - "documentation":"Specifies JSON as object's input serialization format." + "documentation":"

Specifies JSON as object's input serialization format.

" } }, - "documentation":"Describes the serialization format of the object." + "documentation":"

Describes the serialization format of the object.

" }, "InventoryConfiguration":{ "type":"structure", @@ -3708,31 +3709,31 @@ "members":{ "Destination":{ "shape":"InventoryDestination", - "documentation":"Contains information about where to publish the inventory results." + "documentation":"

Contains information about where to publish the inventory results.

" }, "IsEnabled":{ "shape":"IsEnabled", - "documentation":"Specifies whether the inventory is enabled or disabled." + "documentation":"

Specifies whether the inventory is enabled or disabled.

" }, "Filter":{ "shape":"InventoryFilter", - "documentation":"Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria." + "documentation":"

Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.

" }, "Id":{ "shape":"InventoryId", - "documentation":"The ID used to identify the inventory configuration." + "documentation":"

The ID used to identify the inventory configuration.

" }, "IncludedObjectVersions":{ "shape":"InventoryIncludedObjectVersions", - "documentation":"Specifies which object version(s) to included in the inventory results." + "documentation":"

Specifies which object version(s) to included in the inventory results.

" }, "OptionalFields":{ "shape":"InventoryOptionalFields", - "documentation":"Contains the optional fields that are included in the inventory results." + "documentation":"

Contains the optional fields that are included in the inventory results.

" }, "Schedule":{ "shape":"InventorySchedule", - "documentation":"Specifies the schedule for generating inventory results." + "documentation":"

Specifies the schedule for generating inventory results.

" } } }, @@ -3747,7 +3748,7 @@ "members":{ "S3BucketDestination":{ "shape":"InventoryS3BucketDestination", - "documentation":"Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published." + "documentation":"

Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.

" } } }, @@ -3756,16 +3757,16 @@ "members":{ "SSES3":{ "shape":"SSES3", - "documentation":"Specifies the use of SSE-S3 to encrypt delievered Inventory reports.", + "documentation":"

Specifies the use of SSE-S3 to encrypt delievered Inventory reports.

", "locationName":"SSE-S3" }, "SSEKMS":{ "shape":"SSEKMS", - "documentation":"Specifies the use of SSE-KMS to encrypt delievered Inventory reports.", + "documentation":"

Specifies the use of SSE-KMS to encrypt delievered Inventory reports.

", "locationName":"SSE-KMS" } }, - "documentation":"Contains the type of server-side encryption used to encrypt the inventory results." + "documentation":"

Contains the type of server-side encryption used to encrypt the inventory results.

" }, "InventoryFilter":{ "type":"structure", @@ -3773,7 +3774,7 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"The prefix that an object must have to be included in the inventory results." + "documentation":"

The prefix that an object must have to be included in the inventory results.

" } } }, @@ -3827,23 +3828,23 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"The ID of the account that owns the destination bucket." + "documentation":"

The ID of the account that owns the destination bucket.

" }, "Bucket":{ "shape":"BucketName", - "documentation":"The Amazon resource name (ARN) of the bucket where inventory results will be published." + "documentation":"

The Amazon resource name (ARN) of the bucket where inventory results will be published.

" }, "Format":{ "shape":"InventoryFormat", - "documentation":"Specifies the output format of the inventory results." + "documentation":"

Specifies the output format of the inventory results.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"The prefix that is prepended to all inventory results." + "documentation":"

The prefix that is prepended to all inventory results.

" }, "Encryption":{ "shape":"InventoryEncryption", - "documentation":"Contains the type of server-side encryption used to encrypt the inventory results." + "documentation":"

Contains the type of server-side encryption used to encrypt the inventory results.

" } } }, @@ -3853,7 +3854,7 @@ "members":{ "Frequency":{ "shape":"InventoryFrequency", - "documentation":"Specifies how frequently inventory results are produced." + "documentation":"

Specifies how frequently inventory results are produced.

" } } }, @@ -3865,7 +3866,7 @@ "members":{ "Type":{ "shape":"JSONType", - "documentation":"The type of JSON. Valid values: Document, Lines." + "documentation":"

The type of JSON. Valid values: Document, Lines.

" } } }, @@ -3874,7 +3875,7 @@ "members":{ "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"The value used to separate individual records in the output." + "documentation":"

The value used to separate individual records in the output.

" } } }, @@ -3900,7 +3901,7 @@ "Id":{"shape":"NotificationId"}, "LambdaFunctionArn":{ "shape":"LambdaFunctionArn", - "documentation":"Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.", + "documentation":"

Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.

", "locationName":"CloudFunction" }, "Events":{ @@ -3909,7 +3910,7 @@ }, "Filter":{"shape":"NotificationConfigurationFilter"} }, - "documentation":"Container for specifying the AWS Lambda notification configuration." + "documentation":"

Container for specifying the AWS Lambda notification configuration.

" }, "LambdaFunctionConfigurationList":{ "type":"list", @@ -3932,15 +3933,15 @@ "members":{ "Date":{ "shape":"Date", - "documentation":"Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format." + "documentation":"

Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.

" }, "Days":{ "shape":"Days", - "documentation":"Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + "documentation":"

Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.

" }, "ExpiredObjectDeleteMarker":{ "shape":"ExpiredObjectDeleteMarker", - "documentation":"Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy." + "documentation":"

Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.

" } } }, @@ -3951,17 +3952,17 @@ "Expiration":{"shape":"LifecycleExpiration"}, "ID":{ "shape":"ID", - "documentation":"Unique identifier for the rule. The value cannot be longer than 255 characters." + "documentation":"

Unique identifier for the rule. The value cannot be longer than 255 characters.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"Prefix identifying one or more objects to which the rule applies. This is deprecated; use Filter instead.", + "documentation":"

Prefix identifying one or more objects to which the rule applies. This is deprecated; use Filter instead.

", "deprecated":true }, "Filter":{"shape":"LifecycleRuleFilter"}, "Status":{ "shape":"ExpirationStatus", - "documentation":"If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied." + "documentation":"

If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.

" }, "Transitions":{ "shape":"TransitionList", @@ -3981,27 +3982,27 @@ "Prefix":{"shape":"Prefix"}, "Tags":{ "shape":"TagSet", - "documentation":"All of these tags must exist in the object's tag set in order for the rule to apply.", + "documentation":"

All of these tags must exist in the object's tag set in order for the rule to apply.

", "flattened":true, "locationName":"Tag" } }, - "documentation":"This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator." + "documentation":"

This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator.

" }, "LifecycleRuleFilter":{ "type":"structure", "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"Prefix identifying one or more objects to which the rule applies." + "documentation":"

Prefix identifying one or more objects to which the rule applies.

" }, "Tag":{ "shape":"Tag", - "documentation":"This tag must exist in the object's tag set in order for the rule to apply." + "documentation":"

This tag must exist in the object's tag set in order for the rule to apply.

" }, "And":{"shape":"LifecycleRuleAndOperator"} }, - "documentation":"The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified." + "documentation":"

The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.

" }, "LifecycleRules":{ "type":"list", @@ -4013,19 +4014,19 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request." + "documentation":"

Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

" }, "ContinuationToken":{ "shape":"Token", - "documentation":"The ContinuationToken that represents where this request began." + "documentation":"

The ContinuationToken that represents where this request began.

" }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value." + "documentation":"

NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value.

" }, "AnalyticsConfigurationList":{ "shape":"AnalyticsConfigurationList", - "documentation":"The list of analytics configurations for a bucket.", + "documentation":"

The list of analytics configurations for a bucket.

", "locationName":"AnalyticsConfiguration" } } @@ -4036,13 +4037,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket from which analytics configurations are retrieved.", + "documentation":"

The name of the bucket from which analytics configurations are retrieved.

", "location":"uri", "locationName":"Bucket" }, "ContinuationToken":{ "shape":"Token", - "documentation":"The ContinuationToken that represents a placeholder from where this request should begin.", + "documentation":"

The ContinuationToken that represents a placeholder from where this request should begin.

", "location":"querystring", "locationName":"continuation-token" } @@ -4053,20 +4054,20 @@ "members":{ "ContinuationToken":{ "shape":"Token", - "documentation":"If sent in the request, the marker that is used as a starting point for this inventory configuration list response." + "documentation":"

If sent in the request, the marker that is used as a starting point for this inventory configuration list response.

" }, "InventoryConfigurationList":{ "shape":"InventoryConfigurationList", - "documentation":"The list of inventory configurations for a bucket.", + "documentation":"

The list of inventory configurations for a bucket.

", "locationName":"InventoryConfiguration" }, "IsTruncated":{ "shape":"IsTruncated", - "documentation":"Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated." + "documentation":"

Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated.

" }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands." + "documentation":"

The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

" } } }, @@ -4076,13 +4077,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the inventory configurations to retrieve.", + "documentation":"

The name of the bucket containing the inventory configurations to retrieve.

", "location":"uri", "locationName":"Bucket" }, "ContinuationToken":{ "shape":"Token", - "documentation":"The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.", + "documentation":"

The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

", "location":"querystring", "locationName":"continuation-token" } @@ -4093,19 +4094,19 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request." + "documentation":"

Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

" }, "ContinuationToken":{ "shape":"Token", - "documentation":"The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request." + "documentation":"

The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.

" }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands." + "documentation":"

The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

" }, "MetricsConfigurationList":{ "shape":"MetricsConfigurationList", - "documentation":"The list of metrics configurations for a bucket.", + "documentation":"

The list of metrics configurations for a bucket.

", "locationName":"MetricsConfiguration" } } @@ -4116,13 +4117,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket containing the metrics configurations to retrieve.", + "documentation":"

The name of the bucket containing the metrics configurations to retrieve.

", "location":"uri", "locationName":"Bucket" }, "ContinuationToken":{ "shape":"Token", - "documentation":"The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.", + "documentation":"

The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

", "location":"querystring", "locationName":"continuation-token" } @@ -4140,36 +4141,36 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to which the multipart upload was initiated." + "documentation":"

Name of the bucket to which the multipart upload was initiated.

" }, "KeyMarker":{ "shape":"KeyMarker", - "documentation":"The key at or after which the listing began." + "documentation":"

The key at or after which the listing began.

" }, "UploadIdMarker":{ "shape":"UploadIdMarker", - "documentation":"Upload ID after which listing began." + "documentation":"

Upload ID after which listing began.

" }, "NextKeyMarker":{ "shape":"NextKeyMarker", - "documentation":"When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request." + "documentation":"

When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix." + "documentation":"

When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.

" }, "Delimiter":{"shape":"Delimiter"}, "NextUploadIdMarker":{ "shape":"NextUploadIdMarker", - "documentation":"When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request." + "documentation":"

When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request.

" }, "MaxUploads":{ "shape":"MaxUploads", - "documentation":"Maximum number of multipart uploads that could have been included in the response." + "documentation":"

Maximum number of multipart uploads that could have been included in the response.

" }, "IsTruncated":{ "shape":"IsTruncated", - "documentation":"Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads." + "documentation":"

Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.

" }, "Uploads":{ "shape":"MultipartUploadList", @@ -4178,7 +4179,7 @@ "CommonPrefixes":{"shape":"CommonPrefixList"}, "EncodingType":{ "shape":"EncodingType", - "documentation":"Encoding type used by Amazon S3 to encode object keys in the response." + "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response.

" } } }, @@ -4193,7 +4194,7 @@ }, "Delimiter":{ "shape":"Delimiter", - "documentation":"Character you use to group keys.", + "documentation":"

Character you use to group keys.

", "location":"querystring", "locationName":"delimiter" }, @@ -4204,25 +4205,25 @@ }, "KeyMarker":{ "shape":"KeyMarker", - "documentation":"Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.", + "documentation":"

Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.

", "location":"querystring", "locationName":"key-marker" }, "MaxUploads":{ "shape":"MaxUploads", - "documentation":"Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response.", + "documentation":"

Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response.

", "location":"querystring", "locationName":"max-uploads" }, "Prefix":{ "shape":"Prefix", - "documentation":"Lists in-progress uploads only for those keys that begin with the specified prefix.", + "documentation":"

Lists in-progress uploads only for those keys that begin with the specified prefix.

", "location":"querystring", "locationName":"prefix" }, "UploadIdMarker":{ "shape":"UploadIdMarker", - "documentation":"Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.", + "documentation":"

Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.

", "location":"querystring", "locationName":"upload-id-marker" } @@ -4233,20 +4234,20 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results." + "documentation":"

A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.

" }, "KeyMarker":{ "shape":"KeyMarker", - "documentation":"Marks the last Key returned in a truncated response." + "documentation":"

Marks the last Key returned in a truncated response.

" }, "VersionIdMarker":{"shape":"VersionIdMarker"}, "NextKeyMarker":{ "shape":"NextKeyMarker", - "documentation":"Use this value for the key marker request parameter in a subsequent request." + "documentation":"

Use this value for the key marker request parameter in a subsequent request.

" }, "NextVersionIdMarker":{ "shape":"NextVersionIdMarker", - "documentation":"Use this value for the next version id marker parameter in a subsequent request." + "documentation":"

Use this value for the next version id marker parameter in a subsequent request.

" }, "Versions":{ "shape":"ObjectVersionList", @@ -4263,7 +4264,7 @@ "CommonPrefixes":{"shape":"CommonPrefixList"}, "EncodingType":{ "shape":"EncodingType", - "documentation":"Encoding type used by Amazon S3 to encode object keys in the response." + "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response.

" } } }, @@ -4278,7 +4279,7 @@ }, "Delimiter":{ "shape":"Delimiter", - "documentation":"A delimiter is a character you use to group keys.", + "documentation":"

A delimiter is a character you use to group keys.

", "location":"querystring", "locationName":"delimiter" }, @@ -4289,25 +4290,25 @@ }, "KeyMarker":{ "shape":"KeyMarker", - "documentation":"Specifies the key to start with when listing objects in a bucket.", + "documentation":"

Specifies the key to start with when listing objects in a bucket.

", "location":"querystring", "locationName":"key-marker" }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, "Prefix":{ "shape":"Prefix", - "documentation":"Limits the response to keys that begin with the specified prefix.", + "documentation":"

Limits the response to keys that begin with the specified prefix.

", "location":"querystring", "locationName":"prefix" }, "VersionIdMarker":{ "shape":"VersionIdMarker", - "documentation":"Specifies the object version you want to start listing from.", + "documentation":"

Specifies the object version you want to start listing from.

", "location":"querystring", "locationName":"version-id-marker" } @@ -4318,12 +4319,12 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria." + "documentation":"

A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.

" }, "Marker":{"shape":"Marker"}, "NextMarker":{ "shape":"NextMarker", - "documentation":"When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys." + "documentation":"

When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys.

" }, "Contents":{"shape":"ObjectList"}, "Name":{"shape":"BucketName"}, @@ -4333,7 +4334,7 @@ "CommonPrefixes":{"shape":"CommonPrefixList"}, "EncodingType":{ "shape":"EncodingType", - "documentation":"Encoding type used by Amazon S3 to encode object keys in the response." + "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response.

" } } }, @@ -4348,7 +4349,7 @@ }, "Delimiter":{ "shape":"Delimiter", - "documentation":"A delimiter is a character you use to group keys.", + "documentation":"

A delimiter is a character you use to group keys.

", "location":"querystring", "locationName":"delimiter" }, @@ -4359,25 +4360,25 @@ }, "Marker":{ "shape":"Marker", - "documentation":"Specifies the key to start with when listing objects in a bucket.", + "documentation":"

Specifies the key to start with when listing objects in a bucket.

", "location":"querystring", "locationName":"marker" }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, "Prefix":{ "shape":"Prefix", - "documentation":"Limits the response to keys that begin with the specified prefix.", + "documentation":"

Limits the response to keys that begin with the specified prefix.

", "location":"querystring", "locationName":"prefix" }, "RequestPayer":{ "shape":"RequestPayer", - "documentation":"Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.", + "documentation":"

Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.

", "location":"header", "locationName":"x-amz-request-payer" } @@ -4388,51 +4389,51 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria." + "documentation":"

A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.

" }, "Contents":{ "shape":"ObjectList", - "documentation":"Metadata about each object returned." + "documentation":"

Metadata about each object returned.

" }, "Name":{ "shape":"BucketName", - "documentation":"Name of the bucket to list." + "documentation":"

Name of the bucket to list.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"Limits the response to keys that begin with the specified prefix." + "documentation":"

Limits the response to keys that begin with the specified prefix.

" }, "Delimiter":{ "shape":"Delimiter", - "documentation":"A delimiter is a character you use to group keys." + "documentation":"

A delimiter is a character you use to group keys.

" }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more." + "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

" }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter" + "documentation":"

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter

" }, "EncodingType":{ "shape":"EncodingType", - "documentation":"Encoding type used by Amazon S3 to encode object keys in the response." + "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response.

" }, "KeyCount":{ "shape":"KeyCount", - "documentation":"KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys" + "documentation":"

KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys

" }, "ContinuationToken":{ "shape":"Token", - "documentation":"ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key" + "documentation":"

ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key

" }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"NextContinuationToken is sent when isTruncated is true which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key" + "documentation":"

NextContinuationToken is sent when isTruncated is true which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key

" }, "StartAfter":{ "shape":"StartAfter", - "documentation":"StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket" + "documentation":"

StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket

" } } }, @@ -4442,55 +4443,55 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to list.", + "documentation":"

Name of the bucket to list.

", "location":"uri", "locationName":"Bucket" }, "Delimiter":{ "shape":"Delimiter", - "documentation":"A delimiter is a character you use to group keys.", + "documentation":"

A delimiter is a character you use to group keys.

", "location":"querystring", "locationName":"delimiter" }, "EncodingType":{ "shape":"EncodingType", - "documentation":"Encoding type used by Amazon S3 to encode object keys in the response.", + "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response.

", "location":"querystring", "locationName":"encoding-type" }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, "Prefix":{ "shape":"Prefix", - "documentation":"Limits the response to keys that begin with the specified prefix.", + "documentation":"

Limits the response to keys that begin with the specified prefix.

", "location":"querystring", "locationName":"prefix" }, "ContinuationToken":{ "shape":"Token", - "documentation":"ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key", + "documentation":"

ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key

", "location":"querystring", "locationName":"continuation-token" }, "FetchOwner":{ "shape":"FetchOwner", - "documentation":"The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true", + "documentation":"

The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true

", "location":"querystring", "locationName":"fetch-owner" }, "StartAfter":{ "shape":"StartAfter", - "documentation":"StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket", + "documentation":"

StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket

", "location":"querystring", "locationName":"start-after" }, "RequestPayer":{ "shape":"RequestPayer", - "documentation":"Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests.", + "documentation":"

Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests.

", "location":"header", "locationName":"x-amz-request-payer" } @@ -4501,43 +4502,43 @@ "members":{ "AbortDate":{ "shape":"AbortDate", - "documentation":"Date when multipart upload will become eligible for abort operation by lifecycle.", + "documentation":"

Date when multipart upload will become eligible for abort operation by lifecycle.

", "location":"header", "locationName":"x-amz-abort-date" }, "AbortRuleId":{ "shape":"AbortRuleId", - "documentation":"Id of the lifecycle rule that makes a multipart upload eligible for abort operation.", + "documentation":"

Id of the lifecycle rule that makes a multipart upload eligible for abort operation.

", "location":"header", "locationName":"x-amz-abort-rule-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to which the multipart upload was initiated." + "documentation":"

Name of the bucket to which the multipart upload was initiated.

" }, "Key":{ "shape":"ObjectKey", - "documentation":"Object key for which the multipart upload was initiated." + "documentation":"

Object key for which the multipart upload was initiated.

" }, "UploadId":{ "shape":"MultipartUploadId", - "documentation":"Upload ID identifying the multipart upload whose parts are being listed." + "documentation":"

Upload ID identifying the multipart upload whose parts are being listed.

" }, "PartNumberMarker":{ "shape":"PartNumberMarker", - "documentation":"Part number after which listing begins." + "documentation":"

Part number after which listing begins.

" }, "NextPartNumberMarker":{ "shape":"NextPartNumberMarker", - "documentation":"When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request." + "documentation":"

When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.

" }, "MaxParts":{ "shape":"MaxParts", - "documentation":"Maximum number of parts that were allowed in the response." + "documentation":"

Maximum number of parts that were allowed in the response.

" }, "IsTruncated":{ "shape":"IsTruncated", - "documentation":"Indicates whether the returned list of parts is truncated." + "documentation":"

Indicates whether the returned list of parts is truncated.

" }, "Parts":{ "shape":"Parts", @@ -4545,12 +4546,12 @@ }, "Initiator":{ "shape":"Initiator", - "documentation":"Identifies who initiated the multipart upload." + "documentation":"

Identifies who initiated the multipart upload.

" }, "Owner":{"shape":"Owner"}, "StorageClass":{ "shape":"StorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" }, "RequestCharged":{ "shape":"RequestCharged", @@ -4579,19 +4580,19 @@ }, "MaxParts":{ "shape":"MaxParts", - "documentation":"Sets the maximum number of parts to return.", + "documentation":"

Sets the maximum number of parts to return.

", "location":"querystring", "locationName":"max-parts" }, "PartNumberMarker":{ "shape":"PartNumberMarker", - "documentation":"Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.", + "documentation":"

Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.

", "location":"querystring", "locationName":"part-number-marker" }, "UploadId":{ "shape":"MultipartUploadId", - "documentation":"Upload ID identifying the multipart upload whose parts are being listed.", + "documentation":"

Upload ID identifying the multipart upload whose parts are being listed.

", "location":"querystring", "locationName":"uploadId" }, @@ -4613,15 +4614,15 @@ "members":{ "TargetBucket":{ "shape":"TargetBucket", - "documentation":"Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key." + "documentation":"

Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key.

" }, "TargetGrants":{"shape":"TargetGrants"}, "TargetPrefix":{ "shape":"TargetPrefix", - "documentation":"This element lets you specify a prefix for the keys that the log files will be stored under." + "documentation":"

This element lets you specify a prefix for the keys that the log files will be stored under.

" } }, - "documentation":"Container for logging information. Presence of this element indicates that logging is enabled. Parameters TargetBucket and TargetPrefix are required in this case." + "documentation":"

Container for logging information. Presence of this element indicates that logging is enabled. Parameters TargetBucket and TargetPrefix are required in this case.

" }, "MFA":{"type":"string"}, "MFADelete":{ @@ -4662,7 +4663,7 @@ "Name":{"shape":"MetadataKey"}, "Value":{"shape":"MetadataValue"} }, - "documentation":"A metadata key-value pair to store with an object." + "documentation":"

A metadata key-value pair to store with an object.

" }, "MetadataKey":{"type":"string"}, "MetadataValue":{"type":"string"}, @@ -4671,11 +4672,11 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"The prefix used when evaluating an AND predicate." + "documentation":"

The prefix used when evaluating an AND predicate.

" }, "Tags":{ "shape":"TagSet", - "documentation":"The list of tags used when evaluating an AND predicate.", + "documentation":"

The list of tags used when evaluating an AND predicate.

", "flattened":true, "locationName":"Tag" } @@ -4687,11 +4688,11 @@ "members":{ "Id":{ "shape":"MetricsId", - "documentation":"The ID used to identify the metrics configuration." + "documentation":"

The ID used to identify the metrics configuration.

" }, "Filter":{ "shape":"MetricsFilter", - "documentation":"Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator)." + "documentation":"

Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).

" } } }, @@ -4705,15 +4706,15 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"The prefix used when evaluating a metrics filter." + "documentation":"

The prefix used when evaluating a metrics filter.

" }, "Tag":{ "shape":"Tag", - "documentation":"The tag used when evaluating a metrics filter." + "documentation":"

The tag used when evaluating a metrics filter.

" }, "And":{ "shape":"MetricsAndOperator", - "documentation":"A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply." + "documentation":"

A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

" } } }, @@ -4724,24 +4725,24 @@ "members":{ "UploadId":{ "shape":"MultipartUploadId", - "documentation":"Upload ID that identifies the multipart upload." + "documentation":"

Upload ID that identifies the multipart upload.

" }, "Key":{ "shape":"ObjectKey", - "documentation":"Key of the object for which the multipart upload was initiated." + "documentation":"

Key of the object for which the multipart upload was initiated.

" }, "Initiated":{ "shape":"Initiated", - "documentation":"Date and time at which the multipart upload was initiated." + "documentation":"

Date and time at which the multipart upload was initiated.

" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" }, "Owner":{"shape":"Owner"}, "Initiator":{ "shape":"Initiator", - "documentation":"Identifies who initiated the multipart upload." + "documentation":"

Identifies who initiated the multipart upload.

" } } }, @@ -4761,21 +4762,21 @@ "type":"structure", "members":{ }, - "documentation":"The specified bucket does not exist.", + "documentation":"

The specified bucket does not exist.

", "exception":true }, "NoSuchKey":{ "type":"structure", "members":{ }, - "documentation":"The specified key does not exist.", + "documentation":"

The specified key does not exist.

", "exception":true }, "NoSuchUpload":{ "type":"structure", "members":{ }, - "documentation":"The specified multipart upload does not exist.", + "documentation":"

The specified multipart upload does not exist.

", "exception":true }, "NoncurrentVersionExpiration":{ @@ -4783,24 +4784,24 @@ "members":{ "NoncurrentDays":{ "shape":"Days", - "documentation":"Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide." + "documentation":"

Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

" } }, - "documentation":"Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime." + "documentation":"

Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.

" }, "NoncurrentVersionTransition":{ "type":"structure", "members":{ "NoncurrentDays":{ "shape":"Days", - "documentation":"Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide." + "documentation":"

Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

" }, "StorageClass":{ "shape":"TransitionStorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" } }, - "documentation":"Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period in the object's lifetime." + "documentation":"

Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period in the object's lifetime.

" }, "NoncurrentVersionTransitionList":{ "type":"list", @@ -4823,7 +4824,7 @@ "locationName":"CloudFunctionConfiguration" } }, - "documentation":"Container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off on the bucket." + "documentation":"

Container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off on the bucket.

" }, "NotificationConfigurationDeprecated":{ "type":"structure", @@ -4841,11 +4842,11 @@ "locationName":"S3Key" } }, - "documentation":"Container for object key name filtering rules. For information about key name filtering, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide." + "documentation":"

Container for object key name filtering rules. For information about key name filtering, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.

" }, "NotificationId":{ "type":"string", - "documentation":"Optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID." + "documentation":"

Optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.

" }, "Object":{ "type":"structure", @@ -4856,7 +4857,7 @@ "Size":{"shape":"Size"}, "StorageClass":{ "shape":"ObjectStorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" }, "Owner":{"shape":"Owner"} } @@ -4865,7 +4866,7 @@ "type":"structure", "members":{ }, - "documentation":"This operation is not allowed against this storage tier", + "documentation":"

This operation is not allowed against this storage tier

", "exception":true }, "ObjectCannedACL":{ @@ -4886,11 +4887,11 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"Key name of the object to delete." + "documentation":"

Key name of the object to delete.

" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"VersionId for the specific version of the object to delete." + "documentation":"

VersionId for the specific version of the object to delete.

" } } }, @@ -4912,7 +4913,7 @@ "type":"structure", "members":{ }, - "documentation":"The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.", + "documentation":"

The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.

", "exception":true }, "ObjectStorageClass":{ @@ -4931,27 +4932,27 @@ "ETag":{"shape":"ETag"}, "Size":{ "shape":"Size", - "documentation":"Size in bytes of the object." + "documentation":"

Size in bytes of the object.

" }, "StorageClass":{ "shape":"ObjectVersionStorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" }, "Key":{ "shape":"ObjectKey", - "documentation":"The object key." + "documentation":"

The object key.

" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version ID of an object." + "documentation":"

Version ID of an object.

" }, "IsLatest":{ "shape":"IsLatest", - "documentation":"Specifies whether the object is (true) or is not (false) the latest version of an object." + "documentation":"

Specifies whether the object is (true) or is not (false) the latest version of an object.

" }, "LastModified":{ "shape":"LastModified", - "documentation":"Date and time the object was last modified." + "documentation":"

Date and time the object was last modified.

" }, "Owner":{"shape":"Owner"} } @@ -4971,24 +4972,24 @@ "members":{ "S3":{ "shape":"S3Location", - "documentation":"Describes an S3 location that will receive the results of the restore request." + "documentation":"

Describes an S3 location that will receive the results of the restore request.

" } }, - "documentation":"Describes the location where the restore job's output is stored." + "documentation":"

Describes the location where the restore job's output is stored.

" }, "OutputSerialization":{ "type":"structure", "members":{ "CSV":{ "shape":"CSVOutput", - "documentation":"Describes the serialization of CSV-encoded Select results." + "documentation":"

Describes the serialization of CSV-encoded Select results.

" }, "JSON":{ "shape":"JSONOutput", - "documentation":"Specifies JSON as request's output serialization format." + "documentation":"

Specifies JSON as request's output serialization format.

" } }, - "documentation":"Describes how results of the Select job are serialized." + "documentation":"

Describes how results of the Select job are serialized.

" }, "Owner":{ "type":"structure", @@ -5006,19 +5007,19 @@ "members":{ "PartNumber":{ "shape":"PartNumber", - "documentation":"Part number identifying the part. This is a positive integer between 1 and 10,000." + "documentation":"

Part number identifying the part. This is a positive integer between 1 and 10,000.

" }, "LastModified":{ "shape":"LastModified", - "documentation":"Date and time at which the part was uploaded." + "documentation":"

Date and time at which the part was uploaded.

" }, "ETag":{ "shape":"ETag", - "documentation":"Entity tag returned when the part was uploaded." + "documentation":"

Entity tag returned when the part was uploaded.

" }, "Size":{ "shape":"Size", - "documentation":"Size of the uploaded part data." + "documentation":"

Size of the uploaded part data.

" } } }, @@ -5054,15 +5055,15 @@ "members":{ "BytesScanned":{ "shape":"BytesScanned", - "documentation":"Current number of object bytes scanned." + "documentation":"

Current number of object bytes scanned.

" }, "BytesProcessed":{ "shape":"BytesProcessed", - "documentation":"Current number of uncompressed object bytes processed." + "documentation":"

Current number of uncompressed object bytes processed.

" }, "BytesReturned":{ "shape":"BytesReturned", - "documentation":"Current number of bytes of records payload data returned." + "documentation":"

Current number of bytes of records payload data returned.

" } } }, @@ -5071,7 +5072,7 @@ "members":{ "Details":{ "shape":"Progress", - "documentation":"The Progress event details.", + "documentation":"

The Progress event details.

", "eventpayload":true } }, @@ -5093,13 +5094,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket for which the accelerate configuration is set.", + "documentation":"

Name of the bucket for which the accelerate configuration is set.

", "location":"uri", "locationName":"Bucket" }, "AccelerateConfiguration":{ "shape":"AccelerateConfiguration", - "documentation":"Specifies the Accelerate Configuration you want to set for the bucket.", + "documentation":"

Specifies the Accelerate Configuration you want to set for the bucket.

", "locationName":"AccelerateConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} } @@ -5112,7 +5113,7 @@ "members":{ "ACL":{ "shape":"BucketCannedACL", - "documentation":"The canned ACL to apply to the bucket.", + "documentation":"

The canned ACL to apply to the bucket.

", "location":"header", "locationName":"x-amz-acl" }, @@ -5133,31 +5134,31 @@ }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "documentation":"

Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"Allows grantee to list the objects in the bucket.", + "documentation":"

Allows grantee to list the objects in the bucket.

", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"Allows grantee to read the bucket ACL.", + "documentation":"

Allows grantee to read the bucket ACL.

", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWrite":{ "shape":"GrantWrite", - "documentation":"Allows grantee to create, overwrite, and delete any object in the bucket.", + "documentation":"

Allows grantee to create, overwrite, and delete any object in the bucket.

", "location":"header", "locationName":"x-amz-grant-write" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"Allows grantee to write the ACL for the applicable bucket.", + "documentation":"

Allows grantee to write the ACL for the applicable bucket.

", "location":"header", "locationName":"x-amz-grant-write-acp" } @@ -5174,19 +5175,19 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket to which an analytics configuration is stored.", + "documentation":"

The name of the bucket to which an analytics configuration is stored.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"AnalyticsId", - "documentation":"The identifier used to represent an analytics configuration.", + "documentation":"

The identifier used to represent an analytics configuration.

", "location":"querystring", "locationName":"id" }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfiguration", - "documentation":"The configuration and any analyses for the analytics filter.", + "documentation":"

The configuration and any analyses for the analytics filter.

", "locationName":"AnalyticsConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} } @@ -5227,13 +5228,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket for which the server-side encryption configuration is set.", + "documentation":"

The name of the bucket for which the server-side encryption configuration is set.

", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.

", "location":"header", "locationName":"Content-MD5" }, @@ -5255,19 +5256,19 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket where the inventory configuration will be stored.", + "documentation":"

The name of the bucket where the inventory configuration will be stored.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"InventoryId", - "documentation":"The ID used to identify the inventory configuration.", + "documentation":"

The ID used to identify the inventory configuration.

", "location":"querystring", "locationName":"id" }, "InventoryConfiguration":{ "shape":"InventoryConfiguration", - "documentation":"Specifies the inventory configuration.", + "documentation":"

Specifies the inventory configuration.

", "locationName":"InventoryConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} } @@ -5348,19 +5349,19 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The name of the bucket for which the metrics configuration is set.", + "documentation":"

The name of the bucket for which the metrics configuration is set.

", "location":"uri", "locationName":"Bucket" }, "Id":{ "shape":"MetricsId", - "documentation":"The ID used to identify the metrics configuration.", + "documentation":"

The ID used to identify the metrics configuration.

", "location":"querystring", "locationName":"id" }, "MetricsConfiguration":{ "shape":"MetricsConfiguration", - "documentation":"Specifies the metrics configuration.", + "documentation":"

Specifies the metrics configuration.

", "locationName":"MetricsConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} } @@ -5431,13 +5432,13 @@ }, "ConfirmRemoveSelfBucketAccess":{ "shape":"ConfirmRemoveSelfBucketAccess", - "documentation":"Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.", + "documentation":"

Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.

", "location":"header", "locationName":"x-amz-confirm-remove-self-bucket-access" }, "Policy":{ "shape":"Policy", - "documentation":"The bucket policy as a JSON document." + "documentation":"

The bucket policy as a JSON document.

" } }, "payload":"Policy" @@ -5536,7 +5537,7 @@ }, "MFA":{ "shape":"MFA", - "documentation":"The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "documentation":"

The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

", "location":"header", "locationName":"x-amz-mfa" }, @@ -5592,7 +5593,7 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"The canned ACL to apply to the object.", + "documentation":"

The canned ACL to apply to the object.

", "location":"header", "locationName":"x-amz-acl" }, @@ -5613,31 +5614,31 @@ }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "documentation":"

Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"Allows grantee to list the objects in the bucket.", + "documentation":"

Allows grantee to list the objects in the bucket.

", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"Allows grantee to read the bucket ACL.", + "documentation":"

Allows grantee to read the bucket ACL.

", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWrite":{ "shape":"GrantWrite", - "documentation":"Allows grantee to create, overwrite, and delete any object in the bucket.", + "documentation":"

Allows grantee to create, overwrite, and delete any object in the bucket.

", "location":"header", "locationName":"x-amz-grant-write" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"Allows grantee to write the ACL for the applicable bucket.", + "documentation":"

Allows grantee to write the ACL for the applicable bucket.

", "location":"header", "locationName":"x-amz-grant-write-acp" }, @@ -5653,7 +5654,7 @@ }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"VersionId used to reference a specific version of the object.", + "documentation":"

VersionId used to reference a specific version of the object.

", "location":"querystring", "locationName":"versionId" } @@ -5665,43 +5666,43 @@ "members":{ "Expiration":{ "shape":"Expiration", - "documentation":"If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.", + "documentation":"

If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.

", "location":"header", "locationName":"x-amz-expiration" }, "ETag":{ "shape":"ETag", - "documentation":"Entity tag for the uploaded object.", + "documentation":"

Entity tag for the uploaded object.

", "location":"header", "locationName":"ETag" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"Version of the object.", + "documentation":"

Version of the object.

", "location":"header", "locationName":"x-amz-version-id" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -5721,144 +5722,144 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"The canned ACL to apply to the object.", + "documentation":"

The canned ACL to apply to the object.

", "location":"header", "locationName":"x-amz-acl" }, "Body":{ "shape":"Body", - "documentation":"Object data.", + "documentation":"

Object data.

", "streaming":true }, "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to which the PUT operation was initiated.", + "documentation":"

Name of the bucket to which the PUT operation was initiated.

", "location":"uri", "locationName":"Bucket" }, "CacheControl":{ "shape":"CacheControl", - "documentation":"Specifies caching behavior along the request/reply chain.", + "documentation":"

Specifies caching behavior along the request/reply chain.

", "location":"header", "locationName":"Cache-Control" }, "ContentDisposition":{ "shape":"ContentDisposition", - "documentation":"Specifies presentational information for the object.", + "documentation":"

Specifies presentational information for the object.

", "location":"header", "locationName":"Content-Disposition" }, "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "documentation":"

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

", "location":"header", "locationName":"Content-Encoding" }, "ContentLanguage":{ "shape":"ContentLanguage", - "documentation":"The language the content is in.", + "documentation":"

The language the content is in.

", "location":"header", "locationName":"Content-Language" }, "ContentLength":{ "shape":"ContentLength", - "documentation":"Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.", + "documentation":"

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.

", "location":"header", "locationName":"Content-Length" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the part data.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the part data.

", "location":"header", "locationName":"Content-MD5" }, "ContentType":{ "shape":"ContentType", - "documentation":"A standard MIME type describing the format of the object data.", + "documentation":"

A standard MIME type describing the format of the object data.

", "location":"header", "locationName":"Content-Type" }, "Expires":{ "shape":"Expires", - "documentation":"The date and time at which the object is no longer cacheable.", + "documentation":"

The date and time at which the object is no longer cacheable.

", "location":"header", "locationName":"Expires" }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "documentation":"

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"Allows grantee to read the object data and its metadata.", + "documentation":"

Allows grantee to read the object data and its metadata.

", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"Allows grantee to read the object ACL.", + "documentation":"

Allows grantee to read the object ACL.

", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"Allows grantee to write the ACL for the applicable object.", + "documentation":"

Allows grantee to write the ACL for the applicable object.

", "location":"header", "locationName":"x-amz-grant-write-acp" }, "Key":{ "shape":"ObjectKey", - "documentation":"Object key for which the PUT operation was initiated.", + "documentation":"

Object key for which the PUT operation was initiated.

", "location":"uri", "locationName":"Key" }, "Metadata":{ "shape":"Metadata", - "documentation":"A map of metadata to store with the object in S3.", + "documentation":"

A map of metadata to store with the object in S3.

", "location":"headers", "locationName":"x-amz-meta-" }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The type of storage to use for the object. Defaults to 'STANDARD'.", + "documentation":"

The type of storage to use for the object. Defaults to 'STANDARD'.

", "location":"header", "locationName":"x-amz-storage-class" }, "WebsiteRedirectLocation":{ "shape":"WebsiteRedirectLocation", - "documentation":"If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "documentation":"

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

", "location":"header", "locationName":"x-amz-website-redirect-location" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "documentation":"

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -5869,7 +5870,7 @@ }, "Tagging":{ "shape":"TaggingHeader", - "documentation":"The tag-set for the object. The tag-set must be encoded as URL Query parameters", + "documentation":"

The tag-set for the object. The tag-set must be encoded as URL Query parameters

", "location":"header", "locationName":"x-amz-tagging" } @@ -5933,7 +5934,7 @@ "Id":{"shape":"NotificationId"}, "QueueArn":{ "shape":"QueueArn", - "documentation":"Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type.", + "documentation":"

Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type.

", "locationName":"Queue" }, "Events":{ @@ -5942,7 +5943,7 @@ }, "Filter":{"shape":"NotificationConfigurationFilter"} }, - "documentation":"Container for specifying an configuration when you want Amazon S3 to publish events to an Amazon Simple Queue Service (Amazon SQS) queue." + "documentation":"

Container for specifying an configuration when you want Amazon S3 to publish events to an Amazon Simple Queue Service (Amazon SQS) queue.

" }, "QueueConfigurationDeprecated":{ "type":"structure", @@ -5981,7 +5982,7 @@ "members":{ "Payload":{ "shape":"Body", - "documentation":"The byte array of partial, one or more result records.", + "documentation":"

The byte array of partial, one or more result records.

", "eventpayload":true } }, @@ -5992,23 +5993,23 @@ "members":{ "HostName":{ "shape":"HostName", - "documentation":"The host name to use in the redirect request." + "documentation":"

The host name to use in the redirect request.

" }, "HttpRedirectCode":{ "shape":"HttpRedirectCode", - "documentation":"The HTTP redirect code to use on the response. Not required if one of the siblings is present." + "documentation":"

The HTTP redirect code to use on the response. Not required if one of the siblings is present.

" }, "Protocol":{ "shape":"Protocol", - "documentation":"Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request." + "documentation":"

Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.

" }, "ReplaceKeyPrefixWith":{ "shape":"ReplaceKeyPrefixWith", - "documentation":"The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided." + "documentation":"

The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.

" }, "ReplaceKeyWith":{ "shape":"ReplaceKeyWith", - "documentation":"The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided." + "documentation":"

The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided.

" } } }, @@ -6018,11 +6019,11 @@ "members":{ "HostName":{ "shape":"HostName", - "documentation":"Name of the host where requests will be redirected." + "documentation":"

Name of the host where requests will be redirected.

" }, "Protocol":{ "shape":"Protocol", - "documentation":"Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request." + "documentation":"

Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.

" } } }, @@ -6038,15 +6039,15 @@ "members":{ "Role":{ "shape":"Role", - "documentation":"Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating the objects." + "documentation":"

Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating the objects.

" }, "Rules":{ "shape":"ReplicationRules", - "documentation":"Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules.", + "documentation":"

Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules.

", "locationName":"Rule" } }, - "documentation":"Container for replication rules. You can add as many as 1,000 rules. Total replication configuration size can be up to 2 MB." + "documentation":"

Container for replication rules. You can add as many as 1,000 rules. Total replication configuration size can be up to 2 MB.

" }, "ReplicationRule":{ "type":"structure", @@ -6058,26 +6059,26 @@ "members":{ "ID":{ "shape":"ID", - "documentation":"Unique identifier for the rule. The value cannot be longer than 255 characters." + "documentation":"

Unique identifier for the rule. The value cannot be longer than 255 characters.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported." + "documentation":"

Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported.

" }, "Status":{ "shape":"ReplicationRuleStatus", - "documentation":"The rule is ignored if status is not Enabled." + "documentation":"

The rule is ignored if status is not Enabled.

" }, "SourceSelectionCriteria":{ "shape":"SourceSelectionCriteria", - "documentation":"Container for filters that define which source objects should be replicated." + "documentation":"

Container for filters that define which source objects should be replicated.

" }, "Destination":{ "shape":"Destination", - "documentation":"Container for replication destination information." + "documentation":"

Container for replication destination information.

" } }, - "documentation":"Container for information about a particular replication rule." + "documentation":"

Container for information about a particular replication rule.

" }, "ReplicationRuleStatus":{ "type":"string", @@ -6102,12 +6103,12 @@ }, "RequestCharged":{ "type":"string", - "documentation":"If present, indicates that the requester was successfully charged for the request.", + "documentation":"

If present, indicates that the requester was successfully charged for the request.

", "enum":["requester"] }, "RequestPayer":{ "type":"string", - "documentation":"Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html", + "documentation":"

Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

", "enum":["requester"] }, "RequestPaymentConfiguration":{ @@ -6116,7 +6117,7 @@ "members":{ "Payer":{ "shape":"Payer", - "documentation":"Specifies who pays for the download and request fees." + "documentation":"

Specifies who pays for the download and request fees.

" } } }, @@ -6125,7 +6126,7 @@ "members":{ "Enabled":{ "shape":"EnableRequestProgress", - "documentation":"Specifies whether periodic QueryProgress frames should be sent. Valid values: TRUE, FALSE. Default value: FALSE." + "documentation":"

Specifies whether periodic QueryProgress frames should be sent. Valid values: TRUE, FALSE. Default value: FALSE.

" } } }, @@ -6146,7 +6147,7 @@ }, "RestoreOutputPath":{ "shape":"RestoreOutputPath", - "documentation":"Indicates the path in the provided S3 output location where Select results will be restored to.", + "documentation":"

Indicates the path in the provided S3 output location where Select results will be restored to.

", "location":"header", "locationName":"x-amz-restore-output-path" } @@ -6193,34 +6194,34 @@ "members":{ "Days":{ "shape":"Days", - "documentation":"Lifetime of the active copy in days. Do not use with restores that specify OutputLocation." + "documentation":"

Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

" }, "GlacierJobParameters":{ "shape":"GlacierJobParameters", - "documentation":"Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation." + "documentation":"

Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

" }, "Type":{ "shape":"RestoreRequestType", - "documentation":"Type of restore request." + "documentation":"

Type of restore request.

" }, "Tier":{ "shape":"Tier", - "documentation":"Glacier retrieval tier at which the restore will be processed." + "documentation":"

Glacier retrieval tier at which the restore will be processed.

" }, "Description":{ "shape":"Description", - "documentation":"The optional description for the job." + "documentation":"

The optional description for the job.

" }, "SelectParameters":{ "shape":"SelectParameters", - "documentation":"Describes the parameters for Select job types." + "documentation":"

Describes the parameters for Select job types.

" }, "OutputLocation":{ "shape":"OutputLocation", - "documentation":"Describes the location where the restore job's output is stored." + "documentation":"

Describes the location where the restore job's output is stored.

" } }, - "documentation":"Container for restore job parameters." + "documentation":"

Container for restore job parameters.

" }, "RestoreRequestType":{ "type":"string", @@ -6233,11 +6234,11 @@ "members":{ "Condition":{ "shape":"Condition", - "documentation":"A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error." + "documentation":"

A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.

" }, "Redirect":{ "shape":"Redirect", - "documentation":"Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return." + "documentation":"

Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return.

" } } }, @@ -6258,15 +6259,15 @@ "Expiration":{"shape":"LifecycleExpiration"}, "ID":{ "shape":"ID", - "documentation":"Unique identifier for the rule. The value cannot be longer than 255 characters." + "documentation":"

Unique identifier for the rule. The value cannot be longer than 255 characters.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"Prefix identifying one or more objects to which the rule applies." + "documentation":"

Prefix identifying one or more objects to which the rule applies.

" }, "Status":{ "shape":"ExpirationStatus", - "documentation":"If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied." + "documentation":"

If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.

" }, "Transition":{"shape":"Transition"}, "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, @@ -6287,7 +6288,7 @@ "locationName":"FilterRule" } }, - "documentation":"Container for object key name prefix and suffix filtering rules." + "documentation":"

Container for object key name prefix and suffix filtering rules.

" }, "S3Location":{ "type":"structure", @@ -6298,35 +6299,35 @@ "members":{ "BucketName":{ "shape":"BucketName", - "documentation":"The name of the bucket where the restore results will be placed." + "documentation":"

The name of the bucket where the restore results will be placed.

" }, "Prefix":{ "shape":"LocationPrefix", - "documentation":"The prefix that is prepended to the restore results for this request." + "documentation":"

The prefix that is prepended to the restore results for this request.

" }, "Encryption":{"shape":"Encryption"}, "CannedACL":{ "shape":"ObjectCannedACL", - "documentation":"The canned ACL to apply to the restore results." + "documentation":"

The canned ACL to apply to the restore results.

" }, "AccessControlList":{ "shape":"Grants", - "documentation":"A list of grants that control access to the staged results." + "documentation":"

A list of grants that control access to the staged results.

" }, "Tagging":{ "shape":"Tagging", - "documentation":"The tag-set that is applied to the restore results." + "documentation":"

The tag-set that is applied to the restore results.

" }, "UserMetadata":{ "shape":"UserMetadata", - "documentation":"A list of metadata to store with the restore results in S3." + "documentation":"

A list of metadata to store with the restore results in S3.

" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The class of storage used to store the restore results." + "documentation":"

The class of storage used to store the restore results.

" } }, - "documentation":"Describes an S3 location that will receive the results of the restore request." + "documentation":"

Describes an S3 location that will receive the results of the restore request.

" }, "SSECustomerAlgorithm":{"type":"string"}, "SSECustomerKey":{ @@ -6340,10 +6341,10 @@ "members":{ "KeyId":{ "shape":"SSEKMSKeyId", - "documentation":"Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports." + "documentation":"

Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.

" } }, - "documentation":"Specifies the use of SSE-KMS to encrypt delievered Inventory reports.", + "documentation":"

Specifies the use of SSE-KMS to encrypt delievered Inventory reports.

", "locationName":"SSE-KMS" }, "SSEKMSKeyId":{ @@ -6354,7 +6355,7 @@ "type":"structure", "members":{ }, - "documentation":"Specifies the use of SSE-S3 to encrypt delievered Inventory reports.", + "documentation":"

Specifies the use of SSE-S3 to encrypt delievered Inventory reports.

", "locationName":"SSE-S3" }, "SelectObjectContentEventStream":{ @@ -6362,23 +6363,23 @@ "members":{ "Records":{ "shape":"RecordsEvent", - "documentation":"The Records Event." + "documentation":"

The Records Event.

" }, "Stats":{ "shape":"StatsEvent", - "documentation":"The Stats Event." + "documentation":"

The Stats Event.

" }, "Progress":{ "shape":"ProgressEvent", - "documentation":"The Progress Event." + "documentation":"

The Progress Event.

" }, "Cont":{ "shape":"ContinuationEvent", - "documentation":"The Continuation Event." + "documentation":"

The Continuation Event.

" }, "End":{ "shape":"EndEvent", - "documentation":"The End Event." + "documentation":"

The End Event.

" } }, "eventstream":true @@ -6403,56 +6404,56 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"The S3 Bucket.", + "documentation":"

The S3 Bucket.

", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"The Object Key.", + "documentation":"

The Object Key.

", "location":"uri", "locationName":"Key" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"The SSE Algorithm used to encrypt the object. For more information, go to Server-Side Encryption (Using Customer-Provided Encryption Keys.", + "documentation":"

The SSE Algorithm used to encrypt the object. For more information, go to Server-Side Encryption (Using Customer-Provided Encryption Keys.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"The SSE Customer Key. For more information, go to Server-Side Encryption (Using Customer-Provided Encryption Keys.", + "documentation":"

The SSE Customer Key. For more information, go to Server-Side Encryption (Using Customer-Provided Encryption Keys.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"The SSE Customer Key MD5. For more information, go to Server-Side Encryption (Using Customer-Provided Encryption Keys.", + "documentation":"

The SSE Customer Key MD5. For more information, go to Server-Side Encryption (Using Customer-Provided Encryption Keys.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "Expression":{ "shape":"Expression", - "documentation":"The expression that is used to query the object." + "documentation":"

The expression that is used to query the object.

" }, "ExpressionType":{ "shape":"ExpressionType", - "documentation":"The type of the provided expression (e.g., SQL)." + "documentation":"

The type of the provided expression (e.g., SQL).

" }, "RequestProgress":{ "shape":"RequestProgress", - "documentation":"Specifies if periodic request progress information should be enabled." + "documentation":"

Specifies if periodic request progress information should be enabled.

" }, "InputSerialization":{ "shape":"InputSerialization", - "documentation":"Describes the format of the data in the object that is being queried." + "documentation":"

Describes the format of the data in the object that is being queried.

" }, "OutputSerialization":{ "shape":"OutputSerialization", - "documentation":"Describes the format of the data that you want Amazon S3 to return in response." + "documentation":"

Describes the format of the data that you want Amazon S3 to return in response.

" } }, - "documentation":"Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, go to S3Select API Documentation." + "documentation":"

Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, go to S3Select API Documentation.

" }, "SelectParameters":{ "type":"structure", @@ -6465,22 +6466,22 @@ "members":{ "InputSerialization":{ "shape":"InputSerialization", - "documentation":"Describes the serialization format of the object." + "documentation":"

Describes the serialization format of the object.

" }, "ExpressionType":{ "shape":"ExpressionType", - "documentation":"The type of the provided expression (e.g., SQL)." + "documentation":"

The type of the provided expression (e.g., SQL).

" }, "Expression":{ "shape":"Expression", - "documentation":"The expression that is used to query the object." + "documentation":"

The expression that is used to query the object.

" }, "OutputSerialization":{ "shape":"OutputSerialization", - "documentation":"Describes how the results of the Select job are serialized." + "documentation":"

Describes how the results of the Select job are serialized.

" } }, - "documentation":"Describes the parameters for Select job types." + "documentation":"

Describes the parameters for Select job types.

" }, "ServerSideEncryption":{ "type":"string", @@ -6495,14 +6496,14 @@ "members":{ "SSEAlgorithm":{ "shape":"ServerSideEncryption", - "documentation":"Server-side encryption algorithm to use for the default encryption." + "documentation":"

Server-side encryption algorithm to use for the default encryption.

" }, "KMSMasterKeyID":{ "shape":"SSEKMSKeyId", - "documentation":"KMS master key ID to use for the default encryption. This parameter is allowed if SSEAlgorithm is aws:kms." + "documentation":"

KMS master key ID to use for the default encryption. This parameter is allowed if SSEAlgorithm is aws:kms.

" } }, - "documentation":"Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied." + "documentation":"

Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.

" }, "ServerSideEncryptionConfiguration":{ "type":"structure", @@ -6510,21 +6511,21 @@ "members":{ "Rules":{ "shape":"ServerSideEncryptionRules", - "documentation":"Container for information about a particular server-side encryption configuration rule.", + "documentation":"

Container for information about a particular server-side encryption configuration rule.

", "locationName":"Rule" } }, - "documentation":"Container for server-side encryption configuration rules. Currently S3 supports one rule only." + "documentation":"

Container for server-side encryption configuration rules. Currently S3 supports one rule only.

" }, "ServerSideEncryptionRule":{ "type":"structure", "members":{ "ApplyServerSideEncryptionByDefault":{ "shape":"ServerSideEncryptionByDefault", - "documentation":"Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied." + "documentation":"

Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.

" } }, - "documentation":"Container for information about a particular server-side encryption configuration rule." + "documentation":"

Container for information about a particular server-side encryption configuration rule.

" }, "ServerSideEncryptionRules":{ "type":"list", @@ -6537,10 +6538,10 @@ "members":{ "SseKmsEncryptedObjects":{ "shape":"SseKmsEncryptedObjects", - "documentation":"Container for filter information of selection of KMS Encrypted S3 objects." + "documentation":"

Container for filter information of selection of KMS Encrypted S3 objects.

" } }, - "documentation":"Container for filters that define which source objects should be replicated." + "documentation":"

Container for filters that define which source objects should be replicated.

" }, "SseKmsEncryptedObjects":{ "type":"structure", @@ -6548,10 +6549,10 @@ "members":{ "Status":{ "shape":"SseKmsEncryptedObjectsStatus", - "documentation":"The replication for KMS encrypted S3 objects is disabled if status is not Enabled." + "documentation":"

The replication for KMS encrypted S3 objects is disabled if status is not Enabled.

" } }, - "documentation":"Container for filter information of selection of KMS Encrypted S3 objects." + "documentation":"

Container for filter information of selection of KMS Encrypted S3 objects.

" }, "SseKmsEncryptedObjectsStatus":{ "type":"string", @@ -6566,15 +6567,15 @@ "members":{ "BytesScanned":{ "shape":"BytesScanned", - "documentation":"Total number of object bytes scanned." + "documentation":"

Total number of object bytes scanned.

" }, "BytesProcessed":{ "shape":"BytesProcessed", - "documentation":"Total number of uncompressed object bytes processed." + "documentation":"

Total number of uncompressed object bytes processed.

" }, "BytesReturned":{ "shape":"BytesReturned", - "documentation":"Total number of bytes of records payload data returned." + "documentation":"

Total number of bytes of records payload data returned.

" } } }, @@ -6583,7 +6584,7 @@ "members":{ "Details":{ "shape":"Stats", - "documentation":"The Stats event details.", + "documentation":"

The Stats event details.

", "eventpayload":true } }, @@ -6603,7 +6604,7 @@ "members":{ "DataExport":{ "shape":"StorageClassAnalysisDataExport", - "documentation":"A container used to describe how data related to the storage class analysis should be exported." + "documentation":"

A container used to describe how data related to the storage class analysis should be exported.

" } } }, @@ -6616,11 +6617,11 @@ "members":{ "OutputSchemaVersion":{ "shape":"StorageClassAnalysisSchemaVersion", - "documentation":"The version of the output schema to use when exporting data. Must be V_1." + "documentation":"

The version of the output schema to use when exporting data. Must be V_1.

" }, "Destination":{ "shape":"AnalyticsExportDestination", - "documentation":"The place to store the data for an analysis." + "documentation":"

The place to store the data for an analysis.

" } } }, @@ -6638,11 +6639,11 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"Name of the tag." + "documentation":"

Name of the tag.

" }, "Value":{ "shape":"Value", - "documentation":"Value of the tag." + "documentation":"

Value of the tag.

" } } }, @@ -6676,7 +6677,7 @@ "Grantee":{"shape":"Grantee"}, "Permission":{ "shape":"BucketLogsPermission", - "documentation":"Logging permissions assigned to the Grantee for the bucket." + "documentation":"

Logging permissions assigned to the Grantee for the bucket.

" } } }, @@ -6708,7 +6709,7 @@ "Id":{"shape":"NotificationId"}, "TopicArn":{ "shape":"TopicArn", - "documentation":"Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type.", + "documentation":"

Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type.

", "locationName":"Topic" }, "Events":{ @@ -6717,7 +6718,7 @@ }, "Filter":{"shape":"NotificationConfigurationFilter"} }, - "documentation":"Container for specifying the configuration when you want Amazon S3 to publish events to an Amazon Simple Notification Service (Amazon SNS) topic." + "documentation":"

Container for specifying the configuration when you want Amazon S3 to publish events to an Amazon Simple Notification Service (Amazon SNS) topic.

" }, "TopicConfigurationDeprecated":{ "type":"structure", @@ -6729,12 +6730,12 @@ }, "Event":{ "shape":"Event", - "documentation":"Bucket event for which to send notifications.", + "documentation":"

Bucket event for which to send notifications.

", "deprecated":true }, "Topic":{ "shape":"TopicArn", - "documentation":"Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket." + "documentation":"

Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket.

" } } }, @@ -6748,15 +6749,15 @@ "members":{ "Date":{ "shape":"Date", - "documentation":"Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format." + "documentation":"

Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.

" }, "Days":{ "shape":"Days", - "documentation":"Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + "documentation":"

Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.

" }, "StorageClass":{ "shape":"TransitionStorageClass", - "documentation":"The class of storage used to store the object." + "documentation":"

The class of storage used to store the object.

" } } }, @@ -6788,32 +6789,32 @@ "members":{ "CopySourceVersionId":{ "shape":"CopySourceVersionId", - "documentation":"The version of the source object that was copied, if you have enabled versioning on the source bucket.", + "documentation":"

The version of the source object that was copied, if you have enabled versioning on the source bucket.

", "location":"header", "locationName":"x-amz-copy-source-version-id" }, "CopyPartResult":{"shape":"CopyPartResult"}, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -6842,37 +6843,37 @@ }, "CopySource":{ "shape":"CopySource", - "documentation":"The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.", + "documentation":"

The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.

", "location":"header", "locationName":"x-amz-copy-source" }, "CopySourceIfMatch":{ "shape":"CopySourceIfMatch", - "documentation":"Copies the object if its entity tag (ETag) matches the specified tag.", + "documentation":"

Copies the object if its entity tag (ETag) matches the specified tag.

", "location":"header", "locationName":"x-amz-copy-source-if-match" }, "CopySourceIfModifiedSince":{ "shape":"CopySourceIfModifiedSince", - "documentation":"Copies the object if it has been modified since the specified time.", + "documentation":"

Copies the object if it has been modified since the specified time.

", "location":"header", "locationName":"x-amz-copy-source-if-modified-since" }, "CopySourceIfNoneMatch":{ "shape":"CopySourceIfNoneMatch", - "documentation":"Copies the object if its entity tag (ETag) is different than the specified ETag.", + "documentation":"

Copies the object if its entity tag (ETag) is different than the specified ETag.

", "location":"header", "locationName":"x-amz-copy-source-if-none-match" }, "CopySourceIfUnmodifiedSince":{ "shape":"CopySourceIfUnmodifiedSince", - "documentation":"Copies the object if it hasn't been modified since the specified time.", + "documentation":"

Copies the object if it hasn't been modified since the specified time.

", "location":"header", "locationName":"x-amz-copy-source-if-unmodified-since" }, "CopySourceRange":{ "shape":"CopySourceRange", - "documentation":"The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB.", + "documentation":"

The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB.

", "location":"header", "locationName":"x-amz-copy-source-range" }, @@ -6883,49 +6884,49 @@ }, "PartNumber":{ "shape":"PartNumber", - "documentation":"Part number of part being copied. This is a positive integer between 1 and 10,000.", + "documentation":"

Part number of part being copied. This is a positive integer between 1 and 10,000.

", "location":"querystring", "locationName":"partNumber" }, "UploadId":{ "shape":"MultipartUploadId", - "documentation":"Upload ID identifying the multipart upload whose part is being copied.", + "documentation":"

Upload ID identifying the multipart upload whose part is being copied.

", "location":"querystring", "locationName":"uploadId" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "CopySourceSSECustomerAlgorithm":{ "shape":"CopySourceSSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use when decrypting the source object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use when decrypting the source object (e.g., AES256).

", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" }, "CopySourceSSECustomerKey":{ "shape":"CopySourceSSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.

", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-key" }, "CopySourceSSECustomerKeyMD5":{ "shape":"CopySourceSSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" }, @@ -6941,31 +6942,31 @@ "members":{ "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "documentation":"

The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

", "location":"header", "locationName":"x-amz-server-side-encryption" }, "ETag":{ "shape":"ETag", - "documentation":"Entity tag for the uploaded object.", + "documentation":"

Entity tag for the uploaded object.

", "location":"header", "locationName":"ETag" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "documentation":"

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -6987,60 +6988,60 @@ "members":{ "Body":{ "shape":"Body", - "documentation":"Object data.", + "documentation":"

Object data.

", "streaming":true }, "Bucket":{ "shape":"BucketName", - "documentation":"Name of the bucket to which the multipart upload was initiated.", + "documentation":"

Name of the bucket to which the multipart upload was initiated.

", "location":"uri", "locationName":"Bucket" }, "ContentLength":{ "shape":"ContentLength", - "documentation":"Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.", + "documentation":"

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.

", "location":"header", "locationName":"Content-Length" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"The base64-encoded 128-bit MD5 digest of the part data.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the part data.

", "location":"header", "locationName":"Content-MD5" }, "Key":{ "shape":"ObjectKey", - "documentation":"Object key for which the multipart upload was initiated.", + "documentation":"

Object key for which the multipart upload was initiated.

", "location":"uri", "locationName":"Key" }, "PartNumber":{ "shape":"PartNumber", - "documentation":"Part number of part being uploaded. This is a positive integer between 1 and 10,000.", + "documentation":"

Part number of part being uploaded. This is a positive integer between 1 and 10,000.

", "location":"querystring", "locationName":"partNumber" }, "UploadId":{ "shape":"MultipartUploadId", - "documentation":"Upload ID identifying the multipart upload whose part is being uploaded.", + "documentation":"

Upload ID identifying the multipart upload whose part is being uploaded.

", "location":"querystring", "locationName":"uploadId" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "documentation":"

Specifies the algorithm to use to when encrypting the object (e.g., AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "documentation":"

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -7066,12 +7067,12 @@ "members":{ "MFADelete":{ "shape":"MFADelete", - "documentation":"Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.", + "documentation":"

Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.

", "locationName":"MfaDelete" }, "Status":{ "shape":"BucketVersioningStatus", - "documentation":"The versioning state of the bucket." + "documentation":"

The versioning state of the bucket.

" } } }, diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 43fdf06f..c64dc034 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -2,7 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2017-07-24", - "endpointPrefix":"sagemaker", + "endpointPrefix":"api.sagemaker", "jsonVersion":"1.1", "protocol":"json", "serviceAbbreviation":"SageMaker", @@ -127,6 +127,20 @@ ], "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a deep learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

For more information about Amazon SageMaker, see How It Works.

" }, + "CreateTransformJob":{ + "name":"CreateTransformJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTransformJobRequest"}, + "output":{"shape":"CreateTransformJobResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

For more information about how batch transformation works Amazon SageMaker, see How It Works.

" + }, "DeleteEndpoint":{ "name":"DeleteEndpoint", "http":{ @@ -134,7 +148,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteEndpointInput"}, - "documentation":"

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was created.

" + "documentation":"

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was created.

Amazon SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use the RevokeGrant API call.

" }, "DeleteEndpointConfig":{ "name":"DeleteEndpointConfig", @@ -258,6 +272,19 @@ ], "documentation":"

Returns information about a training job.

" }, + "DescribeTransformJob":{ + "name":"DescribeTransformJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTransformJobRequest"}, + "output":{"shape":"DescribeTransformJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Returns information about a transform job.

" + }, "ListEndpointConfigs":{ "name":"ListEndpointConfigs", "http":{ @@ -351,6 +378,16 @@ ], "documentation":"

Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.

" }, + "ListTransformJobs":{ + "name":"ListTransformJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTransformJobsRequest"}, + "output":{"shape":"ListTransformJobsResponse"}, + "documentation":"

Lists transform jobs.

" + }, "StartNotebookInstance":{ "name":"StartNotebookInstance", "http":{ @@ -373,7 +410,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.

All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write toAmazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the Stopped state, it releases all reserved resources for the tuning job.

" + "documentation":"

Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.

All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the Stopped state, it releases all reserved resources for the tuning job.

" }, "StopNotebookInstance":{ "name":"StopNotebookInstance", @@ -396,6 +433,18 @@ ], "documentation":"

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.

Training algorithms provided by Amazon SageMaker save the intermediate results of a model training job. This intermediate data is a valid model artifact. You can use the model artifacts that are saved when Amazon SageMaker stops a training job to create a model.

When it receives a StopTrainingJob request, Amazon SageMaker changes the status of the job to Stopping. After Amazon SageMaker stops the job, it sets the status to Stopped.

" }, + "StopTransformJob":{ + "name":"StopTransformJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopTransformJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Stops a transform job.

When Amazon SageMaker receives a StopTransformJob request, the status of the job changes to Stopping. After Amazon SageMaker stops the job, the status is set to Stopped. When you stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

" + }, "UpdateEndpoint":{ "name":"UpdateEndpoint", "http":{ @@ -450,6 +499,10 @@ } }, "shapes":{ + "Accept":{ + "type":"string", + "max":256 + }, "AddTagsInput":{ "type":"structure", "required":[ @@ -498,6 +551,20 @@ }, "documentation":"

Specifies the training algorithm to use in a CreateTrainingJob request.

For more information about algorithms provided by Amazon SageMaker, see Algorithms. For information about using your own algorithms, see your-algorithms.

" }, + "AssemblyType":{ + "type":"string", + "enum":[ + "None", + "Line" + ] + }, + "BatchStrategy":{ + "type":"string", + "enum":[ + "MultiRecord", + "SingleRecord" + ] + }, "CategoricalParameterRange":{ "type":"structure", "required":[ @@ -887,7 +954,7 @@ "members":{ "TrainingJobName":{ "shape":"TrainingJobName", - "documentation":"

The name of the training job. The name must be unique within an AWS Region in an AWS account. It appears in the Amazon SageMaker console.

" + "documentation":"

The name of the training job. The name must be unique within an AWS Region in an AWS account.

" }, "HyperParameters":{ "shape":"HyperParameters", @@ -937,6 +1004,68 @@ } } }, + "CreateTransformJobRequest":{ + "type":"structure", + "required":[ + "TransformJobName", + "ModelName", + "TransformInput", + "TransformOutput", + "TransformResources" + ], + "members":{ + "TransformJobName":{ + "shape":"TransformJobName", + "documentation":"

The name of the transform job. The name must be unique within an AWS Region in an AWS account.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an AWS Region in an AWS account.

" + }, + "MaxConcurrentTransforms":{ + "shape":"MaxConcurrentTransforms", + "documentation":"

The maximum number of parallel requests that can be sent to each instance in a transform job. This is good for algorithms that implement multiple workers on larger instances . The default value is 1. To allow Amazon SageMaker to determine the appropriate number for MaxConcurrentTransforms, set the value to 0.

" + }, + "MaxPayloadInMB":{ + "shape":"MaxPayloadInMB", + "documentation":"

The maximum payload size allowed, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater or equal to the size of a single record. You can approximate the size of a record by dividing the size of your dataset by the number of records. Then multiply this value by the number of records you want in a mini-batch. It is recommended to enter a value slightly larger than this to ensure the records fit within the maximum payload size. The default value is 6 MB. For an unlimited payload size, set the value to 0.

" + }, + "BatchStrategy":{ + "shape":"BatchStrategy", + "documentation":"

Determines the number of records included in a single mini-batch. SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.

" + }, + "Environment":{ + "shape":"TransformEnvironmentMap", + "documentation":"

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

" + }, + "TransformInput":{ + "shape":"TransformInput", + "documentation":"

Describes the input source and the way the transform job consumes it.

" + }, + "TransformOutput":{ + "shape":"TransformOutput", + "documentation":"

Describes the results of the transform job.

" + }, + "TransformResources":{ + "shape":"TransformResources", + "documentation":"

Describes the resources, including ML instance types and ML instance count, to use for the transform job.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. Adding tags is optional. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + } + } + }, + "CreateTransformJobResponse":{ + "type":"structure", + "required":["TransformJobArn"], + "members":{ + "TransformJobArn":{ + "shape":"TransformJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the transform job.

" + } + } + }, "CreationTime":{"type":"timestamp"}, "DataSource":{ "type":"structure", @@ -1419,7 +1548,7 @@ }, "SecondaryStatus":{ "shape":"SecondaryStatus", - "documentation":"

Provides granular information about the system state. For more information, see TrainingJobStatus.

" + "documentation":"

Provides granular information about the system state. For more information, see TrainingJobStatus.

The valid values for SecondaryStatus are subject to change. They primary provide information on the progress of the training job.

" }, "FailureReason":{ "shape":"FailureReason", @@ -1472,6 +1601,94 @@ "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

A timestamp that indicates when the status of the training job was last modified.

" + }, + "SecondaryStatusTransitions":{ + "shape":"SecondaryStatusTransitions", + "documentation":"

A log of time-ordered secondary statuses that a training job has transitioned.

" + } + } + }, + "DescribeTransformJobRequest":{ + "type":"structure", + "required":["TransformJobName"], + "members":{ + "TransformJobName":{ + "shape":"TransformJobName", + "documentation":"

The name of the transform job that you want to view details of.

" + } + } + }, + "DescribeTransformJobResponse":{ + "type":"structure", + "required":[ + "TransformJobName", + "TransformJobArn", + "TransformJobStatus", + "ModelName", + "TransformInput", + "TransformResources", + "CreationTime" + ], + "members":{ + "TransformJobName":{ + "shape":"TransformJobName", + "documentation":"

The name of the transform job.

" + }, + "TransformJobArn":{ + "shape":"TransformJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the transform job.

" + }, + "TransformJobStatus":{ + "shape":"TransformJobStatus", + "documentation":"

The status of the transform job. If the transform job failed, the reason is returned in the FailureReason field.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the transform job failed, the reason that it failed.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the model used in the transform job.

" + }, + "MaxConcurrentTransforms":{ + "shape":"MaxConcurrentTransforms", + "documentation":"

The maximum number of parallel requests on each instance node that can be launched in a transform job. The default value is 1.

" + }, + "MaxPayloadInMB":{ + "shape":"MaxPayloadInMB", + "documentation":"

The maximum payload size , in MB used in the transform job.

" + }, + "BatchStrategy":{ + "shape":"BatchStrategy", + "documentation":"

SingleRecord means only one record was used per a batch. MultiRecord means batches contained as many records that could possibly fit within the MaxPayloadInMB limit.

" + }, + "Environment":{ + "shape":"TransformEnvironmentMap", + "documentation":"

" + }, + "TransformInput":{ + "shape":"TransformInput", + "documentation":"

Describes the dataset to be transformed and the Amazon S3 location where it is stored.

" + }, + "TransformOutput":{ + "shape":"TransformOutput", + "documentation":"

Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

" + }, + "TransformResources":{ + "shape":"TransformResources", + "documentation":"

Describes the resources, including ML instance types and ML instance count, to use for the transform job.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that shows when the transform Job was created.

" + }, + "TransformStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates when the transform job starts on ML instances. You are billed for the time interval between this time and the value of TransformEndTime.

" + }, + "TransformEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates when the transform job is Completed, Stopped, or Failed. You are billed for the time interval between this time and the value of TransformStartTime.

" } } }, @@ -2431,7 +2648,7 @@ }, "CreationTimeAfter":{ "shape":"Timestamp", - "documentation":"

A filter that only training jobs created after the specified time (timestamp).

" + "documentation":"

A filter that returns only training jobs created after the specified time (timestamp).

" }, "CreationTimeBefore":{ "shape":"Timestamp", @@ -2477,6 +2694,70 @@ } } }, + "ListTransformJobsRequest":{ + "type":"structure", + "members":{ + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only transform jobs created after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only transform jobs created before the specified time.

" + }, + "LastModifiedTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only transform jobs modified after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only transform jobs modified before the specified time.

" + }, + "NameContains":{ + "shape":"NameContains", + "documentation":"

A string in the transform job name. This filter returns only transform jobs whose name contains the specified string.

" + }, + "StatusEquals":{ + "shape":"TransformJobStatus", + "documentation":"

A filter that retrieves only transform jobs with a specific status.

" + }, + "SortBy":{ + "shape":"SortBy", + "documentation":"

The field to sort results by. The default is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Descending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListTransformJobs request was truncated, the response includes a NextToken. To retrieve the next set of transform jobs, use the token in the next request.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of transform jobs to return in the response. The default value is 10.

", + "box":true + } + } + }, + "ListTransformJobsResponse":{ + "type":"structure", + "required":["TransformJobSummaries"], + "members":{ + "TransformJobSummaries":{ + "shape":"TransformJobSummaries", + "documentation":"

An array of TransformJobSummary objects.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of transform jobs, use it in the next request.

" + } + } + }, + "MaxConcurrentTransforms":{ + "type":"integer", + "min":0 + }, "MaxNumberOfTrainingJobs":{ "type":"integer", "min":1 @@ -2485,6 +2766,10 @@ "type":"integer", "min":1 }, + "MaxPayloadInMB":{ + "type":"integer", + "min":0 + }, "MaxResults":{ "type":"integer", "max":100, @@ -3092,7 +3377,10 @@ "type":"string", "enum":[ "Starting", + "LaunchingMLInstances", + "PreparingTrainingStack", "Downloading", + "DownloadingTrainingImage", "Training", "Uploading", "Stopping", @@ -3102,6 +3390,36 @@ "Failed" ] }, + "SecondaryStatusTransition":{ + "type":"structure", + "required":[ + "Status", + "StartTime" + ], + "members":{ + "Status":{ + "shape":"SecondaryStatus", + "documentation":"

Provides granular information about the system state. For more information, see SecondaryStatus under the DescribeTrainingJob response elements.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that shows when the training job has entered this secondary status.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that shows when the secondary status has ended and the job has transitioned into another secondary status.

" + }, + "StatusMessage":{ + "shape":"StatusMessage", + "documentation":"

Shows a brief description and other information about the secondary status. For example, the LaunchingMLInstances secondary status could show a status message of \"Insufficent capacity error while launching instances\".

" + } + }, + "documentation":"

Specifies a secondary status the job has transitioned into. It includes a start timestamp and later an end timestamp. The end timestamp is added either after the job transitions to a different secondary status or after the job has ended.

" + }, + "SecondaryStatusTransitions":{ + "type":"list", + "member":{"shape":"SecondaryStatusTransition"} + }, "SecurityGroupId":{ "type":"string", "max":32 @@ -3131,6 +3449,14 @@ "Descending" ] }, + "SplitType":{ + "type":"string", + "enum":[ + "None", + "Line", + "RecordIO" + ] + }, "StartNotebookInstanceInput":{ "type":"structure", "required":["NotebookInstanceName"], @@ -3141,6 +3467,7 @@ } } }, + "StatusMessage":{"type":"string"}, "StopHyperParameterTuningJobRequest":{ "type":"structure", "required":["HyperParameterTuningJobName"], @@ -3171,6 +3498,16 @@ } } }, + "StopTransformJobRequest":{ + "type":"structure", + "required":["TransformJobName"], + "members":{ + "TransformJobName":{ + "shape":"TransformJobName", + "documentation":"

The name of the transform job to stop.

" + } + } + }, "StoppingCondition":{ "type":"structure", "members":{ @@ -3380,6 +3717,214 @@ }, "documentation":"

Provides summary information about a training job.

" }, + "TransformDataSource":{ + "type":"structure", + "required":["S3DataSource"], + "members":{ + "S3DataSource":{ + "shape":"TransformS3DataSource", + "documentation":"

The S3 location of the data source that is associated with a channel.

" + } + }, + "documentation":"

Describes the location of the channel data.

" + }, + "TransformEnvironmentKey":{ + "type":"string", + "max":1024, + "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" + }, + "TransformEnvironmentMap":{ + "type":"map", + "key":{"shape":"TransformEnvironmentKey"}, + "value":{"shape":"TransformEnvironmentValue"}, + "max":16 + }, + "TransformEnvironmentValue":{ + "type":"string", + "max":10240 + }, + "TransformInput":{ + "type":"structure", + "required":["DataSource"], + "members":{ + "DataSource":{ + "shape":"TransformDataSource", + "documentation":"

Describes the location of the channel data, meaning the S3 location of the input data that the model can consume.

" + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

The multipurpose internet mail extension (MIME) type of the data. Amazon SageMaker uses the MIME type with each http call to transfer data to the transform job.

" + }, + "CompressionType":{ + "shape":"CompressionType", + "documentation":"

Compressing data helps save on storage space. If your transform data is compressed, specify the compression type.and Amazon SageMaker will automatically decompress the data for the transform job accordingly. The default value is None.

" + }, + "SplitType":{ + "shape":"SplitType", + "documentation":"

The method to use to split the transform job's data into smaller batches. The default value is None. If you don't want to split the data, specify None. If you want to split records on a newline character boundary, specify Line. To split records according to the RecordIO format, specify RecordIO.

Amazon SageMaker will send maximum number of records per batch in each request up to the MaxPayloadInMB limit. For more information, see RecordIO data format.

For information about the RecordIO format, see Data Format.

" + } + }, + "documentation":"

Describes the input source of a transform job and the way the transform job consumes it.

" + }, + "TransformInstanceCount":{ + "type":"integer", + "min":1 + }, + "TransformInstanceType":{ + "type":"string", + "enum":[ + "ml.m4.xlarge", + "ml.m4.2xlarge", + "ml.m4.4xlarge", + "ml.m4.10xlarge", + "ml.m4.16xlarge", + "ml.c4.xlarge", + "ml.c4.2xlarge", + "ml.c4.4xlarge", + "ml.c4.8xlarge", + "ml.p2.xlarge", + "ml.p2.8xlarge", + "ml.p2.16xlarge", + "ml.p3.2xlarge", + "ml.p3.8xlarge", + "ml.p3.16xlarge", + "ml.c5.xlarge", + "ml.c5.2xlarge", + "ml.c5.4xlarge", + "ml.c5.9xlarge", + "ml.c5.18xlarge", + "ml.m5.large", + "ml.m5.xlarge", + "ml.m5.2xlarge", + "ml.m5.4xlarge", + "ml.m5.12xlarge", + "ml.m5.24xlarge" + ] + }, + "TransformJobArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:transform-job/.*" + }, + "TransformJobName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "TransformJobStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Completed", + "Failed", + "Stopping", + "Stopped" + ] + }, + "TransformJobSummaries":{ + "type":"list", + "member":{"shape":"TransformJobSummary"} + }, + "TransformJobSummary":{ + "type":"structure", + "required":[ + "TransformJobName", + "TransformJobArn", + "CreationTime", + "TransformJobStatus" + ], + "members":{ + "TransformJobName":{ + "shape":"TransformJobName", + "documentation":"

The name of the transform job.

" + }, + "TransformJobArn":{ + "shape":"TransformJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the transform job.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that shows when the transform Job was created.

" + }, + "TransformEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates when the transform job ends on compute instances. For successful jobs and stopped jobs, this is the exact time recorded after the results are uploaded. For failed jobs, this is when Amazon SageMaker detected that the job failed.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

Indicates when the transform job was last modified.

" + }, + "TransformJobStatus":{ + "shape":"TransformJobStatus", + "documentation":"

The status of the transform job.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the transform job failed, the reason it failed.

" + } + }, + "documentation":"

Provides a summary information for a transform job. Multiple TransformJobSummary objects are returned as a list after calling ListTransformJobs.

" + }, + "TransformOutput":{ + "type":"structure", + "required":["S3OutputPath"], + "members":{ + "S3OutputPath":{ + "shape":"S3Uri", + "documentation":"

The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix.

For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, the input data s3://bucket-name/input-name-prefix/dataset01/data.csv will have the transformed data stored at s3://bucket-name/key-name-prefix/dataset01/, based on the original name, as a series of .part files (.part0001, part0002, etc).

" + }, + "Accept":{ + "shape":"Accept", + "documentation":"

The MIME type used to specify the output data. Amazon SageMaker uses the MIME type with each http call to transfer data from the transform job.

" + }, + "AssembleWith":{ + "shape":"AssemblyType", + "documentation":"

Defines how to assemble the results of the transform job as a single S3 object. You should select a format that is most convenient to you. To concatenate the results in binary format, specify None. To add a newline character at the end of every transformed record, specify Line. To assemble the output in RecordIO format, specify RecordIO. The default value is None.

For information about the RecordIO format, see Data Format.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS Key Management Service (AWS KMS) key for Amazon S3 server-side encryption that Amazon SageMaker uses to encrypt the transformed data.

If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" + } + }, + "documentation":"

Describes the results of a transform job output.

" + }, + "TransformResources":{ + "type":"structure", + "required":[ + "InstanceType", + "InstanceCount" + ], + "members":{ + "InstanceType":{ + "shape":"TransformInstanceType", + "documentation":"

The ML compute instance type for the transform job. For using built-in algorithms to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should suffice. There is no default value for InstanceType.

" + }, + "InstanceCount":{ + "shape":"TransformInstanceCount", + "documentation":"

The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1.

" + } + }, + "documentation":"

Describes the resources, including ML instance types and ML instance count, to use for transform job.

" + }, + "TransformS3DataSource":{ + "type":"structure", + "required":[ + "S3DataType", + "S3Uri" + ], + "members":{ + "S3DataType":{ + "shape":"S3DataType", + "documentation":"

If you choose S3Prefix, S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for batch transform.

If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch transform.

" + }, + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

" + } + }, + "documentation":"

Describes the S3 data source.

" + }, "UpdateEndpointInput":{ "type":"structure", "required":[ diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index a093e72f..0caeac74 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -289,7 +289,7 @@ {"shape":"InternalServiceError"}, {"shape":"PreconditionNotMetException"} ], - "documentation":"

Modifies many of the details of a secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" + "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -444,6 +444,11 @@ "shape":"RecoveryWindowInDaysType", "documentation":"

(Optional) Specifies the number of days that Secrets Manager waits before it can delete the secret.

This value can range from 7 to 30 days. The default value is 30.

", "box":true + }, + "ForceDeleteWithoutRecovery":{ + "shape":"BooleanType", + "documentation":"

(Optional) Specifies that the secret is to be deleted immediately without any recovery window. You cannot use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. It is permanently lost.

", + "box":true } } }, @@ -1252,7 +1257,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to update or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to modify or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", @@ -1261,19 +1266,19 @@ }, "Description":{ "shape":"DescriptionType", - "documentation":"

(Optional) Specifies a user-provided description of the secret.

" + "documentation":"

(Optional) Specifies an updated user-provided description of the secret.

" }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

(Optional) Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the protected text in the versions of this secret.

If you don't specify this value, then Secrets Manager defaults to using the default CMK in the account (the one named aws/secretsmanager). If a AWS KMS CMK with that name doesn't exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's Plaintext or PlaintextString fields.

You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN in this field.

" + "documentation":"

(Optional) Specifies an updated ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the protected text in new versions of this secret.

You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN of that CMK in this field. The user making the call must have permissions to both the secret and the CMK in their respective accounts.

" }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

(Optional) Specifies binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

This parameter is not accessible using the Secrets Manager console.

" + "documentation":"

(Optional) Specifies updated binary data that you want to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then use the appropriate technique for your tool to pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

This parameter is not accessible using the Secrets Manager console.

" }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies updated text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. You can also 'escape' the double quote character in the embedded JSON text by prefacing each with a backslash. For example, the following string is surrounded by double-quotes. All of the embedded double quotes are escaped:

\"[{\\\"username\\\":\\\"bob\\\"},{\\\"password\\\":\\\"abc123xyz456\\\"}]\"

" } } }, @@ -1282,15 +1287,15 @@ "members":{ "ARN":{ "shape":"SecretARNType", - "documentation":"

The ARN of this secret.

Secrets Manager automatically adds several random characters to the name at the end of the ARN when you initially create a secret. This affects only the ARN and not the actual friendly name. This ensures that if you create a new secret with the same name as an old secret that you previously deleted, then users with access to the old secret don't automatically get access to the new secret because the ARNs are different.

" + "documentation":"

The ARN of the secret that was updated.

Secrets Manager automatically adds several random characters to the name at the end of the ARN when you initially create a secret. This affects only the ARN and not the actual friendly name. This ensures that if you create a new secret with the same name as an old secret that you previously deleted, then users with access to the old secret don't automatically get access to the new secret because the ARNs are different.

" }, "Name":{ "shape":"SecretNameType", - "documentation":"

The friendly name of this secret.

" + "documentation":"

The friendly name of the secret that was updated.

" }, "VersionId":{ "shape":"SecretVersionIdType", - "documentation":"

If a version of the secret was created or updated by this operation, then its unique identifier is returned.

" + "documentation":"

If a new version of the secret was created by this operation, then VersionId contains the unique identifier of the new version.

" } } }, @@ -1316,7 +1321,7 @@ }, "MoveToVersionId":{ "shape":"SecretVersionIdType", - "documentation":"

(Optional) The secret version ID that you want to add the staging labels to.

If any of the staging labels are already attached to a different version of the secret, then they are removed from that version before adding them to this version.

", + "documentation":"

(Optional) The secret version ID that you want to add the staging labels to.

If any of the staging labels are already attached to a different version of the secret, then they are automatically removed from that version before adding them to this version.

", "box":true } } diff --git a/botocore/data/snowball/2016-06-30/service-2.json b/botocore/data/snowball/2016-06-30/service-2.json index 5257b757..910baa80 100644 --- a/botocore/data/snowball/2016-06-30/service-2.json +++ b/botocore/data/snowball/2016-06-30/service-2.json @@ -41,7 +41,7 @@ {"shape":"InvalidJobStateException"}, {"shape":"KMSRequestFailedException"} ], - "documentation":"

Cancels the specified job. You can only cancel a job before its JobState value changes to PreparingAppliance. Requesting the ListJobs or DescribeJob action will return a job's JobState as part of the response element data returned.

" + "documentation":"

Cancels the specified job. You can only cancel a job before its JobState value changes to PreparingAppliance. Requesting the ListJobs or DescribeJob action returns a job's JobState as part of the response element data returned.

" }, "CreateAddress":{ "name":"CreateAddress", @@ -68,7 +68,8 @@ "errors":[ {"shape":"InvalidResourceException"}, {"shape":"KMSRequestFailedException"}, - {"shape":"InvalidInputCombinationException"} + {"shape":"InvalidInputCombinationException"}, + {"shape":"Ec2RequestFailedException"} ], "documentation":"

Creates an empty cluster. Each cluster supports five nodes. You use the CreateJob action separately to create the jobs for each of these nodes. The cluster does not ship until these five node jobs have been created.

" }, @@ -84,7 +85,8 @@ {"shape":"InvalidResourceException"}, {"shape":"KMSRequestFailedException"}, {"shape":"InvalidInputCombinationException"}, - {"shape":"ClusterLimitExceededException"} + {"shape":"ClusterLimitExceededException"}, + {"shape":"Ec2RequestFailedException"} ], "documentation":"

Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.

" }, @@ -206,6 +208,20 @@ ], "documentation":"

Returns an array of ClusterListEntry objects of the specified length. Each ClusterListEntry object contains a cluster's state, a cluster's ID, and other important status information.

" }, + "ListCompatibleImages":{ + "name":"ListCompatibleImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCompatibleImagesRequest"}, + "output":{"shape":"ListCompatibleImagesResult"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"Ec2RequestFailedException"} + ], + "documentation":"

This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs) that are owned by your AWS account that would be supported for use on a Snowball Edge device. Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM, Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the AWS Marketplace.

" + }, "ListJobs":{ "name":"ListJobs", "http":{ @@ -231,7 +247,8 @@ {"shape":"InvalidResourceException"}, {"shape":"InvalidJobStateException"}, {"shape":"KMSRequestFailedException"}, - {"shape":"InvalidInputCombinationException"} + {"shape":"InvalidInputCombinationException"}, + {"shape":"Ec2RequestFailedException"} ], "documentation":"

While a cluster's ClusterState value is in the AwaitingQuorum state, you can update some of the information associated with a cluster. Once the cluster changes to a different job state, usually 60 minutes after the cluster being created, this action is no longer available.

" }, @@ -248,7 +265,8 @@ {"shape":"InvalidJobStateException"}, {"shape":"KMSRequestFailedException"}, {"shape":"InvalidInputCombinationException"}, - {"shape":"ClusterLimitExceededException"} + {"shape":"ClusterLimitExceededException"}, + {"shape":"Ec2RequestFailedException"} ], "documentation":"

While a job's JobState value is New, you can update some of the information associated with a job. Once the job changes to a different job state, usually within 60 minutes of the job being created, this action is no longer available.

" } @@ -326,6 +344,12 @@ "type":"list", "member":{"shape":"Address"} }, + "AmiId":{ + "type":"string", + "max":21, + "min":12, + "pattern":"(ami-[0-9a-f]{8})|(ami-[0-9a-f]{17})" + }, "Boolean":{"type":"boolean"}, "CancelClusterRequest":{ "type":"structure", @@ -426,7 +450,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball appliance to use for this cluster. Currently, the only supported appliance type for cluster jobs is EDGE.

" + "documentation":"

The type of AWS Snowball device to use for this cluster. Currently, the only supported device type for cluster jobs is EDGE.

" }, "CreationDate":{ "shape":"Timestamp", @@ -442,7 +466,7 @@ }, "ShippingOption":{ "shape":"ShippingOption", - "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge appliance, rather it represents how quickly each appliance moves to its destination while in transit. Regional shipping speeds are as follows:

" + "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge device, rather it represents how quickly each device moves to its destination while in transit. Regional shipping speeds are as follows:

" }, "Notification":{ "shape":"Notification", @@ -465,6 +489,24 @@ "Cancelled" ] }, + "CompatibleImage":{ + "type":"structure", + "members":{ + "AmiId":{ + "shape":"String", + "documentation":"

The unique identifier for an individual Snowball Edge AMI.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The optional name of a compatible image.

" + } + }, + "documentation":"

A JSON-formatted object that describes a compatible Amazon Machine Image (AMI), including the ID and name for a Snowball Edge AMI. This AMI is compatible with the device's physical hardware requirements, and it should be able to be run in an SBE1 instance on the device.

" + }, + "CompatibleImageList":{ + "type":"list", + "member":{"shape":"CompatibleImage"} + }, "CreateAddressRequest":{ "type":"structure", "required":["Address"], @@ -520,11 +562,11 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball appliance to use for this cluster. Currently, the only supported appliance type for cluster jobs is EDGE.

" + "documentation":"

The type of AWS Snowball device to use for this cluster. Currently, the only supported device type for cluster jobs is EDGE.

" }, "ShippingOption":{ "shape":"ShippingOption", - "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge appliance, rather it represents how quickly each appliance moves to its destination while in transit. Regional shipping speeds are as follows:

" + "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge device, rather it represents how quickly each device moves to its destination while in transit. Regional shipping speeds are as follows:

" }, "Notification":{ "shape":"Notification", @@ -590,7 +632,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball appliance to use for this job. Currently, the only supported appliance type for cluster jobs is EDGE.

" + "documentation":"

The type of AWS Snowball device to use for this job. Currently, the only supported device type for cluster jobs is EDGE.

" }, "ForwardingAddressId":{ "shape":"AddressId", @@ -627,7 +669,7 @@ "documentation":"

The total number of objects for a transfer between a Snowball and Amazon S3. This value is set to 0 (zero) until all the keys that will be transferred have been listed.

" } }, - "documentation":"

Defines the real-time status of a Snowball's data transfer while the appliance is at AWS. This data is only available while a job has a JobState value of InProgress, for both import and export jobs.

" + "documentation":"

Defines the real-time status of a Snowball's data transfer while the device is at AWS. This data is only available while a job has a JobState value of InProgress, for both import and export jobs.

" }, "DescribeAddressRequest":{ "type":"structure", @@ -716,6 +758,33 @@ } } }, + "Ec2AmiResource":{ + "type":"structure", + "required":["AmiId"], + "members":{ + "AmiId":{ + "shape":"AmiId", + "documentation":"

The ID of the AMI in Amazon EC2.

" + }, + "SnowballAmiId":{ + "shape":"String", + "documentation":"

The ID of the AMI on the Snowball Edge device.

" + } + }, + "documentation":"

A JSON-formatted object that contains the IDs for an Amazon Machine Image (AMI), including the Amazon EC2 AMI ID and the Snowball Edge AMI ID. Each AMI has these two IDs to simplify identifying the AMI in both the AWS Cloud and on the device.

" + }, + "Ec2AmiResourceList":{ + "type":"list", + "member":{"shape":"Ec2AmiResource"} + }, + "Ec2RequestFailedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted action.

", + "exception":true + }, "EventTriggerDefinition":{ "type":"structure", "members":{ @@ -822,7 +891,11 @@ "InvalidResourceException":{ "type":"structure", "members":{ - "Message":{"shape":"String"} + "Message":{"shape":"String"}, + "ResourceType":{ + "shape":"String", + "documentation":"

The provided resource value is invalid.

" + } }, "documentation":"

The specified resource can't be found. Check the information you provided in your last request, and try again.

", "exception":true @@ -854,7 +927,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of appliance used with this job.

" + "documentation":"

The type of device used with this job.

" }, "CreationDate":{ "shape":"Timestamp", @@ -906,7 +979,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of appliance used with this job.

" + "documentation":"

The type of device used with this job.

" }, "CreationDate":{ "shape":"Timestamp", @@ -946,7 +1019,7 @@ }, "DataTransferProgress":{ "shape":"DataTransfer", - "documentation":"

A value that defines the real-time status of a Snowball's data transfer while the appliance is at AWS. This data is only available while a job has a JobState value of InProgress, for both import and export jobs.

" + "documentation":"

A value that defines the real-time status of a Snowball's data transfer while the device is at AWS. This data is only available while a job has a JobState value of InProgress, for both import and export jobs.

" }, "JobLogInfo":{ "shape":"JobLogs", @@ -977,9 +1050,13 @@ "LambdaResources":{ "shape":"LambdaResourceList", "documentation":"

The Python-language Lambda functions for this job.

" + }, + "Ec2AmiResources":{ + "shape":"Ec2AmiResourceList", + "documentation":"

The Amazon Machine Images (AMIs) associated with this job.

" } }, - "documentation":"

Contains an array of S3Resource objects. Each S3Resource object represents an Amazon S3 bucket that your transferred data will be exported from or imported into.

" + "documentation":"

Contains an array of AWS resource objects. Each object represents an Amazon S3 bucket, an AWS Lambda function, or an Amazon Machine Image (AMI) based on Amazon EC2 that is associated with a particular job.

" }, "JobState":{ "type":"string", @@ -1112,6 +1189,32 @@ } } }, + "ListCompatibleImagesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListLimit", + "documentation":"

The maximum number of results for the list of compatible images. Currently, a Snowball Edge device can store 10 AMIs.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

HTTP requests are stateless. To identify what object comes \"next\" in the list of compatible images, you can specify a value for NextToken as the starting point for your list of returned images.

" + } + } + }, + "ListCompatibleImagesResult":{ + "type":"structure", + "members":{ + "CompatibleImages":{ + "shape":"CompatibleImageList", + "documentation":"

A JSON-formatted object that describes a compatible AMI, including the ID and name for a Snowball Edge AMI.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Because HTTP requests are stateless, this is the starting point for your next list of returned images.

" + } + } + }, "ListJobsRequest":{ "type":"structure", "members":{ @@ -1325,7 +1428,7 @@ }, "Resources":{ "shape":"JobResource", - "documentation":"

The updated S3Resource object (for a single Amazon S3 bucket or key range), or the updated JobResource object (for multiple buckets or key ranges).

" + "documentation":"

The updated JobResource object, or the updated JobResource object.

" }, "AddressId":{ "shape":"AddressId", @@ -1355,5 +1458,5 @@ } } }, - "documentation":"

AWS Snowball is a petabyte-scale data transport solution that uses secure appliances to transfer large amounts of data between your on-premises data centers and Amazon Simple Storage Service (Amazon S3). The Snowball commands described here provide access to the same functionality that is available in the AWS Snowball Management Console, which enables you to create and manage jobs for Snowball. To transfer data locally with a Snowball appliance, you'll need to use the Snowball client or the Amazon S3 API adapter for Snowball. For more information, see the User Guide.

" + "documentation":"

AWS Snowball is a petabyte-scale data transport solution that uses secure devices to transfer large amounts of data between your on-premises data centers and Amazon Simple Storage Service (Amazon S3). The Snowball commands described here provide access to the same functionality that is available in the AWS Snowball Management Console, which enables you to create and manage jobs for Snowball. To transfer data locally with a Snowball device, you'll need to use the Snowball client or the Amazon S3 API adapter for Snowball. For more information, see the User Guide.

" } diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index a05aea2a..1521383f 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -1028,6 +1028,23 @@ ], "documentation":"

Retrieves the patch baseline that should be used for the specified patch group.

" }, + "LabelParameterVersion":{ + "name":"LabelParameterVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LabelParameterVersionRequest"}, + "output":{"shape":"LabelParameterVersionResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"TooManyUpdates"}, + {"shape":"ParameterNotFound"}, + {"shape":"ParameterVersionNotFound"}, + {"shape":"ParameterVersionLabelLimitExceeded"} + ], + "documentation":"

A parameter label is a user-defined alias to help you manage different versions of a parameter. When you modify a parameter, Systems Manager automatically saves a new version and increments the version number by one. A label can help you remember the purpose of a parameter when there are multiple versions.

Parameter labels have the following requirements and restrictions.

" + }, "ListAssociationVersions":{ "name":"ListAssociationVersions", "http":{ @@ -2359,6 +2376,10 @@ "shape":"Targets", "documentation":"

The specified targets.

" }, + "TargetMaps":{ + "shape":"TargetMaps", + "documentation":"

The specified key-value mapping of document parameters to target resources.

" + }, "ResolvedTargets":{ "shape":"ResolvedTargets", "documentation":"

A list of resolved targets in the rate control execution.

" @@ -2505,6 +2526,10 @@ "shape":"Targets", "documentation":"

The targets defined by the user when starting the Automation.

" }, + "TargetMaps":{ + "shape":"TargetMaps", + "documentation":"

The specified key-value mapping of document parameters to target resources.

" + }, "ResolvedTargets":{ "shape":"ResolvedTargets", "documentation":"

A list of targets that resolved during the execution.

" @@ -6643,7 +6668,8 @@ "Message":{"shape":"String"} }, "documentation":"

An error occurred on the server side.

", - "exception":true + "exception":true, + "fault":true }, "InvalidActivation":{ "type":"structure", @@ -7394,6 +7420,37 @@ "type":"list", "member":{"shape":"TagKey"} }, + "LabelParameterVersionRequest":{ + "type":"structure", + "required":[ + "Name", + "Labels" + ], + "members":{ + "Name":{ + "shape":"PSParameterName", + "documentation":"

The parameter name on which you want to attach one or more labels.

" + }, + "ParameterVersion":{ + "shape":"PSParameterVersion", + "documentation":"

The specific version of the parameter on which you want to attach one or more labels. If no version is specified, the system attaches the label to the latest version.)

", + "box":true + }, + "Labels":{ + "shape":"ParameterLabelList", + "documentation":"

One or more labels to attach to the specified parameter version.

" + } + } + }, + "LabelParameterVersionResult":{ + "type":"structure", + "members":{ + "InvalidLabels":{ + "shape":"ParameterLabelList", + "documentation":"

The label does not meet the requirements. For information about parameter label requirements, see Labeling Parameters in the AWS Systems Manager User Guide.

" + } + } + }, "LastResourceDataSyncMessage":{"type":"string"}, "LastResourceDataSyncStatus":{ "type":"string", @@ -8620,6 +8677,11 @@ "max":2048, "min":1 }, + "PSParameterSelector":{ + "type":"string", + "max":128, + "min":0 + }, "PSParameterValue":{ "type":"string", "max":4096, @@ -8644,6 +8706,22 @@ "Version":{ "shape":"PSParameterVersion", "documentation":"

The parameter version.

" + }, + "Selector":{ + "shape":"PSParameterSelector", + "documentation":"

Either the version number or the label used to retrieve the parameter value. Specify selectors by using one of the following formats:

parameter_name:version

parameter_name:label

" + }, + "SourceResult":{ + "shape":"String", + "documentation":"

Applies to parameters that reference information in other AWS services. SourceResult is the raw result or response from the source.

" + }, + "LastModifiedDate":{ + "shape":"DateTime", + "documentation":"

Date the parameter was last changed or updated and the parameter version was created.

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the parameter.

" } }, "documentation":"

An Amazon EC2 Systems Manager parameter in Parameter Store.

" @@ -8699,6 +8777,10 @@ "Version":{ "shape":"PSParameterVersion", "documentation":"

The parameter version.

" + }, + "Labels":{ + "shape":"ParameterLabelList", + "documentation":"

Labels assigned to the parameter version.

" } }, "documentation":"

Information about parameter usage.

" @@ -8713,6 +8795,17 @@ "min":1, "pattern":"^([a-zA-Z0-9:/_-]+)$" }, + "ParameterLabel":{ + "type":"string", + "max":100, + "min":1 + }, + "ParameterLabelList":{ + "type":"list", + "member":{"shape":"ParameterLabel"}, + "max":10, + "min":1 + }, "ParameterLimitExceeded":{ "type":"structure", "members":{ @@ -8824,7 +8917,7 @@ "type":"string", "max":132, "min":1, - "pattern":"tag:.+|Name|Type|KeyId|Path" + "pattern":"tag:.+|Name|Type|KeyId|Path|Label" }, "ParameterStringFilterList":{ "type":"list", @@ -8859,6 +8952,14 @@ "type":"list", "member":{"shape":"ParameterValue"} }, + "ParameterVersionLabelLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

A parameter version can have a maximum of ten labels.

", + "exception":true + }, "ParameterVersionNotFound":{ "type":"structure", "members":{ @@ -9594,7 +9695,6 @@ "WindowId", "Targets", "TaskArn", - "ServiceRoleArn", "TaskType", "MaxConcurrency", "MaxErrors" @@ -9614,7 +9714,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The role that should be assumed when executing the task.

" + "documentation":"

The role to assume when running the Maintenance Window task.

If you do not specify a service role ARN, Systems Manager will use your account's service-linked role for Systems Manager by default. If no service-linked role for Systems Manager exists in your account, it will be created when you run RegisterTaskWithMaintenanceWindow without specifying a service role ARN.

For more information, see Service-Linked Role Permissions for Systems Manager and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance Window Tasks? in the AWS Systems Manager User Guide.

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -10211,6 +10311,10 @@ "shape":"Targets", "documentation":"

A key-value mapping to target resources. Required if you specify TargetParameterName.

" }, + "TargetMaps":{ + "shape":"TargetMaps", + "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps cannot be specified together.

" + }, "MaxConcurrency":{ "shape":"MaxConcurrency", "documentation":"

The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10.

" @@ -10320,6 +10424,25 @@ "OverriddenParameters":{ "shape":"AutomationParameterMap", "documentation":"

A user-specified list of parameters to override when executing a step.

" + }, + "IsEnd":{ + "shape":"Boolean", + "documentation":"

The flag which can be used to end automation no matter whether the step succeeds or fails.

", + "box":true + }, + "NextStep":{ + "shape":"String", + "documentation":"

The next step after the step succeeds.

", + "box":true + }, + "IsCritical":{ + "shape":"Boolean", + "documentation":"

The flag which can be used to help decide whether the failure of current step leads to the Automation failure.

", + "box":true + }, + "ValidNextSteps":{ + "shape":"ValidNextStepList", + "documentation":"

Strategies used when step fails, we support Continue and Abort. Abort will fail the automation when the step fails. Continue will ignore the failure of current step and allow automation to execute the next step. With conditional branching, we add step:stepName to support the automation to go to another specific step.

" } }, "documentation":"

Detailed information about an the execution state of an Automation step.

" @@ -10480,6 +10603,35 @@ "min":1, "pattern":"^[\\p{L}\\p{Z}\\p{N}_.:/=\\-@]*$" }, + "TargetMap":{ + "type":"map", + "key":{"shape":"TargetMapKey"}, + "value":{"shape":"TargetMapValueList"}, + "max":20, + "min":1 + }, + "TargetMapKey":{ + "type":"string", + "max":50, + "min":1 + }, + "TargetMapValue":{ + "type":"string", + "max":50, + "min":1 + }, + "TargetMapValueList":{ + "type":"list", + "member":{"shape":"TargetMapValue"}, + "max":25, + "min":0 + }, + "TargetMaps":{ + "type":"list", + "member":{"shape":"TargetMap"}, + "max":300, + "min":0 + }, "TargetParameterList":{ "type":"list", "member":{"shape":"ParameterValue"} @@ -10894,7 +11046,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The IAM service role ARN to modify. The system assumes this role during task execution.

" + "documentation":"

The IAM service role ARN to modify. The system assumes this role during task execution.

If you do not specify a service role ARN, Systems Manager will use your account's service-linked role for Systems Manager by default. If no service-linked role for Systems Manager exists in your account, it will be created when you run RegisterTaskWithMaintenanceWindow without specifying a service role ARN.

For more information, see Service-Linked Role Permissions for Systems Manager and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance Window Tasks? in the AWS Systems Manager User Guide.

" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -11126,6 +11278,15 @@ } }, "Url":{"type":"string"}, + "ValidNextStep":{ + "type":"string", + "max":65535, + "min":1 + }, + "ValidNextStepList":{ + "type":"list", + "member":{"shape":"ValidNextStep"} + }, "Version":{ "type":"string", "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index 26a29a9a..54aab405 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -136,7 +136,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" }, "CreateSMBFileShare":{ "name":"CreateSMBFileShare", @@ -150,7 +150,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using a SMB interface. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using a SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -264,7 +264,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a file share from a file gateway. This operation is only supported in the file gateway type.

" + "documentation":"

Deletes a file share from a file gateway. This operation is only supported for file gateways.

" }, "DeleteGateway":{ "name":"DeleteGateway", @@ -432,7 +432,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gets a description for one or more Network File System (NFS) file shares from a file gateway. This operation is only supported in the file gateway type.

" + "documentation":"

Gets a description for one or more Network File System (NFS) file shares from a file gateway. This operation is only supported for file gateways.

" }, "DescribeSMBFileShares":{ "name":"DescribeSMBFileShares", @@ -446,7 +446,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gets a description for one or more Server Message Block (SMB) file shares from a file gateway. This operation is only supported in the file gateway type.

" + "documentation":"

Gets a description for one or more Server Message Block (SMB) file shares from a file gateway. This operation is only supported for file gateways.

" }, "DescribeSMBSettings":{ "name":"DescribeSMBSettings", @@ -460,7 +460,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gets a description of a Server Message Block (SMB) file share settings from a file gateway. This operation is only supported in the file gateway type.

" + "documentation":"

Gets a description of a Server Message Block (SMB) file share settings from a file gateway. This operation is only supported for file gateways.

" }, "DescribeSnapshotSchedule":{ "name":"DescribeSnapshotSchedule", @@ -600,7 +600,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Adds a file gateway to an Active Directory domain. This operation is only supported in the file gateway type that supports the SMB file protocol.

" + "documentation":"

Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol.

" }, "ListFileShares":{ "name":"ListFileShares", @@ -614,7 +614,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gets a list of the file shares for a specific file gateway, or the list of file shares that belong to the calling user account. This operation is only supported in the file gateway type.

" + "documentation":"

Gets a list of the file shares for a specific file gateway, or the list of file shares that belong to the calling user account. This operation is only supported for file gateways.

" }, "ListGateways":{ "name":"ListGateways", @@ -726,7 +726,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sends you notification through CloudWatch Events when all files written to your NFS file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the NFS file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported in the file gateway type.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

" + "documentation":"

Sends you notification through CloudWatch Events when all files written to your NFS file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the NFS file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

" }, "RefreshCache":{ "name":"RefreshCache", @@ -824,7 +824,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sets the password for the guest user “smbguest”. \"smbguest\" is the user when the Authentication method for the file share is “GuestAccess”.

" + "documentation":"

Sets the password for the guest user smbguest. The smbguest user is the user when the authentication method for the file share is set to GuestAccess.

" }, "ShutdownGateway":{ "name":"ShutdownGateway", @@ -950,7 +950,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a Server Message Block (SMB) file share. This operation is only supported in the file gateway type.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Updates a Server Message Block (SMB) file share.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" }, "UpdateSnapshotSchedule":{ "name":"UpdateSnapshotSchedule", @@ -1120,7 +1120,7 @@ }, "Authentication":{ "type":"string", - "documentation":"

The authentication method of the file share. Valid values: \"ActiveDirectory\" or \"GuestAccess\". The default is \"ActiveDirectory\".

", + "documentation":"

The authentication method of the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

", "max":15, "min":5 }, @@ -1316,7 +1316,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } } }, @@ -1360,7 +1360,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", @@ -1372,11 +1372,11 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

Sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" }, "ClientList":{ "shape":"FileShareClientList", @@ -1384,19 +1384,19 @@ }, "Squash":{ "shape":"Squash", - "documentation":"

Maps a user to anonymous user. Valid options are the following:

" + "documentation":"

Maps a user to anonymous user. Valid options are the following:

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + "documentation":"

A value that sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is private.

" } }, "documentation":"

CreateNFSFileShareInput

" @@ -1434,7 +1434,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", @@ -1446,35 +1446,35 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

Sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + "documentation":"

A value that sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that are allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that are not allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "Authentication":{ "shape":"Authentication", - "documentation":"

The authentication method that users use to access the file share.

Valid values: \"ActiveDirectory\" or \"GuestAccess\". The default is \"ActiveDirectory\".

" + "documentation":"

The authentication method that users use to access the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

" } }, "documentation":"

CreateSMBFileShareInput

" @@ -1570,6 +1570,14 @@ "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + }, + "KMSEncrypted":{ + "shape":"Boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } }, "documentation":"

A JSON object containing one or more of the following fields:

" @@ -1618,7 +1626,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the KMS Key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS Key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } }, "documentation":"

CreateTapeWithBarcodeInput

" @@ -1669,7 +1677,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } }, "documentation":"

CreateTapesInput

" @@ -2146,7 +2154,7 @@ "members":{ "TapeArchives":{ "shape":"TapeArchives", - "documentation":"

An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name(ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.

" + "documentation":"

An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name (ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.

" }, "Marker":{ "shape":"Marker", @@ -2313,7 +2321,7 @@ "members":{ "TargetARN":{ "shape":"TargetARN", - "documentation":"

Specifies the unique Amazon Resource Name(ARN) that encodes the iSCSI qualified name(iqn) of a tape drive or media changer target.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) that encodes the iSCSI qualified name(iqn) of a tape drive or media changer target.

" }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", @@ -2343,7 +2351,7 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name of the disabled gateway.

" + "documentation":"

The unique Amazon Resource Name (ARN) of the disabled gateway.

" } }, "documentation":"

DisableGatewayOutput

" @@ -2660,7 +2668,7 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name of the file gateway you want to add to the Active Directory domain.

" + "documentation":"

The unique Amazon Resource Name (ARN) of the file gateway you want to add to the Active Directory domain.

" }, "DomainName":{ "shape":"DomainName", @@ -2682,14 +2690,14 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name of the gateway that joined the domain.

" + "documentation":"

The unique Amazon Resource Name (ARN) of the gateway that joined the domain.

" } }, "documentation":"

JoinDomainOutput

" }, "KMSKey":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", "max":2048, "min":20 }, @@ -2939,7 +2947,7 @@ "documentation":"

The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.

" } }, - "documentation":"

Describes Network File System (NFS) file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported in the file gateway type.

" + "documentation":"

Describes Network File System (NFS) file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported for file gateways.

" }, "NFSFileShareInfo":{ "type":"structure", @@ -2951,7 +2959,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "KMSEncrypted":{ "shape":"boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" }, "KMSKey":{"shape":"KMSKey"}, "Path":{"shape":"Path"}, @@ -2959,22 +2967,22 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{"shape":"ObjectACL"}, "ClientList":{"shape":"FileShareClientList"}, "Squash":{"shape":"Squash"}, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + "documentation":"

A value that sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is private.

" } }, "documentation":"

The Unix file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.

" @@ -3037,7 +3045,7 @@ }, "ObjectACL":{ "type":"string", - "documentation":"

Sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

", + "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

", "enum":[ "private", "public-read", @@ -3204,7 +3212,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "KMSEncrypted":{ "shape":"boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" }, "KMSKey":{"shape":"KMSKey"}, "Path":{ @@ -3215,32 +3223,32 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{"shape":"ObjectACL"}, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + "documentation":"

A value that sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that are allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that are not allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "Authentication":{"shape":"Authentication"} }, - "documentation":"

The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.

" + "documentation":"

The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported for file gateways.

" }, "SMBFileShareInfoList":{ "type":"list", @@ -3302,7 +3310,7 @@ }, "Password":{ "shape":"SMBGuestPassword", - "documentation":"

The password you want to set for your SMB Server.

" + "documentation":"

The password that you want to set for your SMB Server.

" } }, "documentation":"

SetSMBGuestPasswordInput

" @@ -3339,7 +3347,7 @@ }, "Squash":{ "type":"string", - "documentation":"

The user mapped to anonymous user. Valid options are the following:

", + "documentation":"

The user mapped to anonymous user. Valid options are the following:

", "max":15, "min":5 }, @@ -3428,7 +3436,8 @@ "VolumeUsedInBytes":{ "shape":"VolumeUsedInBytes", "documentation":"

The size of the data stored on the volume in bytes.

This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.

" - } + }, + "KMSKey":{"shape":"KMSKey"} }, "documentation":"

Describes an iSCSI stored volume.

" }, @@ -3792,7 +3801,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" }, "NFSFileShareDefaults":{ "shape":"NFSFileShareDefaults", @@ -3800,11 +3809,11 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by a file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

Sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" }, "ClientList":{ "shape":"FileShareClientList", @@ -3812,19 +3821,19 @@ }, "Squash":{ "shape":"Squash", - "documentation":"

The user mapped to anonymous user. Valid options are the following:

" + "documentation":"

The user mapped to anonymous user. Valid options are the following:

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + "documentation":"

A value that sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is private.

" } }, "documentation":"

UpdateNFSFileShareInput

" @@ -3845,7 +3854,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the SMB file share you want to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the SMB file share that you want to update.

" }, "KMSEncrypted":{ "shape":"Boolean", @@ -3853,35 +3862,35 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

Sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + "documentation":"

A value that sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that are allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that are not allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" } }, "documentation":"

UpdateSMBFileShareInput

" @@ -4096,5 +4105,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" + "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" } diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json index ce08faa6..7d7e5cc0 100644 --- a/botocore/data/transcribe/2017-10-26/service-2.json +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -450,11 +450,15 @@ }, "ShowSpeakerLabels":{ "shape":"Boolean", - "documentation":"

Determines whether the transcription job should use speaker recognition to identify different speakers in the input audio. If you set the ShowSpeakerLabels field to true, you must also set the maximum number of speaker labels MaxSpeakerLabels field.

" + "documentation":"

Determines whether the transcription job uses speaker recognition to identify different speakers in the input audio. Speaker recognition labels individual speakers in the audio file. If you set the ShowSpeakerLabels field to true, you must also set the maximum number of speaker labels MaxSpeakerLabels field.

You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

" }, "MaxSpeakerLabels":{ "shape":"MaxSpeakers", "documentation":"

The maximum number of speakers to identify in the input audio. If there are more speakers in the audio than this number, multiple speakers will be identified as a single speaker. If you specify the MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true.

" + }, + "ChannelIdentification":{ + "shape":"Boolean", + "documentation":"

Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.

You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

" } }, "documentation":"

Provides optional settings for the StartTranscriptionJob operation.

" diff --git a/botocore/handlers.py b/botocore/handlers.py index a5e2c469..02604aaf 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -844,6 +844,12 @@ class ClientMethodAlias(object): def __call__(self, client, **kwargs): return getattr(client, self._actual) + +def remove_subscribe_to_shard(class_attributes, **kwargs): + if 'subscribe_to_shard' in class_attributes: + # subscribe_to_shard requires HTTP 2 support + del class_attributes['subscribe_to_shard'] + # This is a list of (event_name, handler). # When a Session is created, everything in this list will be # automatically registered with that Session. @@ -856,6 +862,7 @@ BUILTIN_HANDLERS = [ convert_body_to_file_like_object, REGISTER_LAST), ('before-parameter-build.s3.PutObject', convert_body_to_file_like_object, REGISTER_LAST), + ('creating-client-class.kinesis', remove_subscribe_to_shard), ('creating-client-class', add_generate_presigned_url), ('creating-client-class.s3', add_generate_presigned_post), ('creating-client-class.rds', add_generate_db_auth_token), diff --git a/botocore/hooks.py b/botocore/hooks.py index 2a928956..cd817a47 100644 --- a/botocore/hooks.py +++ b/botocore/hooks.py @@ -343,6 +343,49 @@ class HierarchicalEmitter(BaseEventHooks): return new_instance +class AliasedEventEmitter(HierarchicalEmitter): + EVENT_ALIASES = { + 'api.sagemaker': 'sagemaker' + } + + def __init__(self, event_aliases=None): + super(AliasedEventEmitter, self).__init__() + self._event_aliases = event_aliases + if event_aliases is None: + self._event_aliases = self.EVENT_ALIASES + + def _emit(self, event_name, kwargs, stop_on_response=False): + aliased_event_name = self._alias_event_name(event_name) + return super(AliasedEventEmitter, self)._emit( + aliased_event_name, kwargs, stop_on_response + ) + + def _verify_and_register(self, event_name, handler, unique_id, + register_method, unique_id_uses_count): + aliased_event_name = self._alias_event_name(event_name) + super(AliasedEventEmitter, self)._verify_and_register( + aliased_event_name, handler, unique_id, register_method, + unique_id_uses_count + ) + + def unregister(self, event_name, handler=None, unique_id=None, + unique_id_uses_count=False): + aliased_event_name = self._alias_event_name(event_name) + super(AliasedEventEmitter, self).unregister( + aliased_event_name, handler, unique_id, unique_id_uses_count + ) + + def _alias_event_name(self, event_name): + for old_part, new_part in self._event_aliases.items(): + if old_part in event_name: + new_name = event_name.replace(old_part, new_part) + logger.debug("Changing event name from %s to %s" % ( + event_name, new_name + )) + return new_name + return event_name + + class _PrefixTrie(object): """Specialized prefix trie that handles wildcards. diff --git a/botocore/model.py b/botocore/model.py index 873530da..295dd47c 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -50,7 +50,7 @@ class Shape(object): 'payload', 'streaming', 'timestampFormat', 'xmlNamespace', 'resultWrapper', 'xmlAttribute', 'eventstream', 'event', 'eventheader', 'eventpayload', - 'jsonvalue'] + 'jsonvalue', 'timestampFormat'] METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum', 'idempotencyToken', 'error', 'exception'] MAP_TYPE = OrderedDict @@ -106,6 +106,7 @@ class Shape(object): * resultWrapper * xmlAttribute * jsonvalue + * timestampFormat :rtype: dict :return: Serialization information about the shape. @@ -316,7 +317,7 @@ class ServiceModel(object): return self.metadata[name] except KeyError: raise UndefinedModelAttributeError( - '"%s" not defined in the metadata of the the model: %s' % + '"%s" not defined in the metadata of the model: %s' % (name, self)) # Signature version is one of the rare properties diff --git a/botocore/serialize.py b/botocore/serialize.py index d6e886f1..9975ef12 100644 --- a/botocore/serialize.py +++ b/botocore/serialize.py @@ -39,8 +39,9 @@ and if a str/unicode type is passed in, it will be encoded as utf-8. """ import re import base64 -from xml.etree import ElementTree import calendar +import datetime +from xml.etree import ElementTree from botocore.compat import six @@ -138,12 +139,17 @@ class Serializer(object): return int(calendar.timegm(value.timetuple())) def _timestamp_rfc822(self, value): + if isinstance(value, datetime.datetime): + value = self._timestamp_unixtimestamp(value) return formatdate(value, usegmt=True) - def _convert_timestamp_to_str(self, value): + def _convert_timestamp_to_str(self, value, timestamp_format=None): + if timestamp_format is None: + timestamp_format = self.TIMESTAMP_FORMAT + timestamp_format = timestamp_format.lower() datetime_obj = parse_to_aware_datetime(value) converter = getattr( - self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower()) + self, '_timestamp_%s' % timestamp_format) final_value = converter(datetime_obj) return final_value @@ -245,7 +251,8 @@ class QuerySerializer(Serializer): serialized[prefix] = self._get_base64(value) def _serialize_type_timestamp(self, serialized, value, shape, prefix=''): - serialized[prefix] = self._convert_timestamp_to_str(value) + serialized[prefix] = self._convert_timestamp_to_str( + value, shape.serialization.get('timestampFormat')) def _serialize_type_boolean(self, serialized, value, shape, prefix=''): if value: @@ -304,7 +311,7 @@ class JSONSerializer(Serializer): 'X-Amz-Target': target, 'Content-Type': 'application/x-amz-json-%s' % json_version, } - body = {} + body = self.MAP_TYPE() input_shape = operation_model.input_shape if input_shape is not None: self._serialize(body, parameters, input_shape) @@ -355,7 +362,8 @@ class JSONSerializer(Serializer): serialized[key] = value def _serialize_type_timestamp(self, serialized, value, shape, key): - serialized[key] = self._convert_timestamp_to_str(value) + serialized[key] = self._convert_timestamp_to_str( + value, shape.serialization.get('timestampFormat')) def _serialize_type_blob(self, serialized, value, shape, key): serialized[key] = self._get_base64(value) @@ -371,6 +379,8 @@ class BaseRestSerializer(Serializer): Subclasses must implement the ``_serialize_body_params`` method. """ + QUERY_STRING_TIMESTAMP_FORMAT = 'iso8601' + HEADER_TIMESTAMP_FORMAT = 'rfc822' # This is a list of known values for the "location" key in the # serialization dict. The location key tells us where on the request # to put the serialized value. @@ -486,6 +496,13 @@ class BaseRestSerializer(Serializer): elif isinstance(param_value, bool): partitioned['query_string_kwargs'][ key_name] = str(param_value).lower() + elif member.type_name == 'timestamp': + timestamp_format = member.serialization.get( + 'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT) + partitioned['query_string_kwargs'][ + key_name] = self._convert_timestamp_to_str( + param_value, timestamp_format + ) else: partitioned['query_string_kwargs'][key_name] = param_value elif location == 'header': @@ -518,7 +535,9 @@ class BaseRestSerializer(Serializer): if shape.type_name == 'timestamp': datetime_obj = parse_to_aware_datetime(value) timestamp = calendar.timegm(datetime_obj.utctimetuple()) - return self._timestamp_rfc822(timestamp) + timestamp_format = shape.serialization.get( + 'timestampFormat', self.HEADER_TIMESTAMP_FORMAT) + return self._convert_timestamp_to_str(timestamp, timestamp_format) elif is_json_value_header(shape): # Serialize with no spaces after separators to save space in # the header. @@ -623,7 +642,8 @@ class RestXMLSerializer(BaseRestSerializer): def _serialize_type_timestamp(self, xmlnode, params, shape, name): node = ElementTree.SubElement(xmlnode, name) - node.text = self._convert_timestamp_to_str(params) + node.text = self._convert_timestamp_to_str( + params, shape.serialization.get('timestampFormat')) def _default_serialize(self, xmlnode, params, shape, name): node = ElementTree.SubElement(xmlnode, name) diff --git a/botocore/session.py b/botocore/session.py index c0ec08c4..facb16d1 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -20,6 +20,7 @@ import copy import logging import os import platform +import warnings from botocore import __version__ import botocore.configloader @@ -29,7 +30,7 @@ from botocore.exceptions import ConfigNotFound, ProfileNotFound from botocore.exceptions import UnknownServiceError, PartialCredentialsError from botocore.errorfactory import ClientExceptionsFactory from botocore import handlers -from botocore.hooks import HierarchicalEmitter, first_non_none_response +from botocore.hooks import AliasedEventEmitter, first_non_none_response from botocore.loaders import create_loader from botocore.parsers import ResponseParserFactory from botocore.regions import EndpointResolver @@ -138,7 +139,7 @@ class Session(object): if session_vars: self.session_var_map.update(session_vars) if event_hooks is None: - self._events = HierarchicalEmitter() + self._events = AliasedEventEmitter() else: self._events = event_hooks if include_builtin_handlers: @@ -160,6 +161,7 @@ class Session(object): self._session_instance_vars['profile'] = profile self._client_config = None self._components = ComponentLocator() + self._internal_components = ComponentLocator() self._register_components() def _register_components(self): @@ -188,7 +190,7 @@ class Session(object): loader = self.get_component('data_loader') endpoints = loader.load_data('endpoints') return EndpointResolver(endpoints) - self._components.lazy_register_component( + self._internal_components.lazy_register_component( 'endpoint_resolver', create_default_resolver) def _register_response_parser_factory(self): @@ -196,7 +198,7 @@ class Session(object): ResponseParserFactory()) def _register_exceptions_factory(self): - self._components.register_component( + self._internal_components.register_component( 'exceptions_factory', ClientExceptionsFactory()) def _register_builtin_handlers(self, events): @@ -723,7 +725,29 @@ class Session(object): return first_non_none_response(responses) def get_component(self, name): - return self._components.get_component(name) + try: + return self._components.get_component(name) + except ValueError: + if name in ['endpoint_resolver', 'exceptions_factory']: + warnings.warn( + 'Fetching the %s component with the get_component() ' + 'method is deprecated as the component has always been ' + 'considered an internal interface of botocore' % name, + DeprecationWarning) + return self._internal_components.get_component(name) + raise + + def _get_internal_component(self, name): + # While this method may be called by botocore classes outside of the + # Session, this method should **never** be used by a class that lives + # outside of botocore. + return self._internal_components.get_component(name) + + def _register_internal_component(self, name, component): + # While this method may be called by botocore classes outside of the + # Session, this method should **never** be used by a class that lives + # outside of botocore. + return self._internal_components.register_component(name, component) def register_component(self, name, component): self._components.register_component(name, component) @@ -848,8 +872,8 @@ class Session(object): aws_secret_access_key)) else: credentials = self.get_credentials() - endpoint_resolver = self.get_component('endpoint_resolver') - exceptions_factory = self.get_component('exceptions_factory') + endpoint_resolver = self._get_internal_component('endpoint_resolver') + exceptions_factory = self._get_internal_component('exceptions_factory') client_creator = botocore.client.ClientCreator( loader, endpoint_resolver, self.user_agent(), event_emitter, retryhandler, translate, response_parser_factory, @@ -874,7 +898,7 @@ class Session(object): :rtype: list :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]) """ - resolver = self.get_component('endpoint_resolver') + resolver = self._get_internal_component('endpoint_resolver') return resolver.get_available_partitions() def get_available_regions(self, service_name, partition_name='aws', @@ -897,7 +921,7 @@ class Session(object): fips-us-gov-west-1, etc). :return: Returns a list of endpoint names (e.g., ["us-east-1"]). """ - resolver = self.get_component('endpoint_resolver') + resolver = self._get_internal_component('endpoint_resolver') results = [] try: service_data = self.get_service_data(service_name) diff --git a/botocore/utils.py b/botocore/utils.py index 0de8e578..48b1b451 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -333,7 +333,7 @@ def percent_encode(input_str, safe=SAFE_CHARS): If given the binary type, will simply URL encode it. If given the text type, will produce the binary type by UTF-8 encoding the - text. If given something else, will convert it to the the text type + text. If given something else, will convert it to the text type first. """ # If its not a binary or text string, make it a text string. @@ -911,6 +911,7 @@ class S3RegionRedirector(object): error = response[1].get('Error', {}) error_code = error.get('Code') + response_metadata = response[1].get('ResponseMetadata', {}) # We have to account for 400 responses because # if we sign a Head* request with the wrong region, @@ -918,7 +919,12 @@ class S3RegionRedirector(object): # body saying it's an "AuthorizationHeaderMalformed". is_special_head_object = ( error_code in ['301', '400'] and - operation.name in ['HeadObject', 'HeadBucket'] + operation.name == 'HeadObject' + ) + is_special_head_bucket = ( + error_code in ['301', '400'] and + operation.name == 'HeadBucket' and + 'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {}) ) is_wrong_signing_region = ( error_code == 'AuthorizationHeaderMalformed' and @@ -926,7 +932,7 @@ class S3RegionRedirector(object): ) is_permanent_redirect = error_code == 'PermanentRedirect' if not any([is_special_head_object, is_wrong_signing_region, - is_permanent_redirect]): + is_permanent_redirect, is_special_head_bucket]): return bucket = request_dict['context']['signing']['bucket'] diff --git a/docs/source/conf.py b/docs/source/conf.py index 2e3c6ec9..2938d824 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.10.' # The full version, including alpha/beta/rc tags. -release = '1.10.55' +release = '1.10.78' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/topics/paginators.rst b/docs/source/topics/paginators.rst index 1130c5fa..df4a77b2 100644 --- a/docs/source/topics/paginators.rst +++ b/docs/source/topics/paginators.rst @@ -109,7 +109,7 @@ JMESPath expressions that are applied to each page of results through the When filtering with JMESPath expressions, each page of results that is yielded by the paginator is mapped through the JMESPath expression. If a JMESPath expression returns a single value that is not an array, that value is yielded -directly. If the the result of applying the JMESPath expression to a page of +directly. If the result of applying the JMESPath expression to a page of results is a list, then each value of the list is yielded individually (essentially implementing a flat map). For example, in the above expression, each key that has a ``Size`` greater than `100` is yielded by the diff --git a/setup.py b/setup.py index f11600c9..5b475883 100644 --- a/setup.py +++ b/setup.py @@ -78,5 +78,6 @@ setup( 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', ), ) diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 03a081f4..545d5b98 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -136,8 +136,6 @@ def test_service_name_matches_endpoint_prefix(): # named based on the service id matches the service name used to # create a client (i.e the directory name in botocore/data) # unless there is an explicit exception. - # If there model has no serviceId then we fall back to the endpoint - # prefix. session = get_session() loader = session.get_component('data_loader') diff --git a/tests/functional/test_h2_required.py b/tests/functional/test_h2_required.py new file mode 100644 index 00000000..3e7ba1c2 --- /dev/null +++ b/tests/functional/test_h2_required.py @@ -0,0 +1,50 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from botocore.session import get_session + +_H2_REQUIRED = object() +# Service names to list of known HTTP 2 operations +_KNOWN_SERVICES = { + 'kinesis': ['SubscribeToShard'], +} + + +def test_all_uses_of_h2_are_known(): + session = get_session() + loader = session.get_component('data_loader') + + services = loader.list_available_services('service-2') + + for service in services: + service_model = session.get_service_model(service) + h2_config = service_model.metadata.get('protocolSettings', {}).get('h2') + if h2_config == 'required': + yield _assert_h2_service_is_known, service + elif h2_config == 'eventstream': + for operation in service_model.operation_names: + operation_model = service_model.operation_model(operation) + if operation_model.has_event_stream_output: + yield _assert_h2_operation_is_known, service, operation + + +def _assert_h2_service_is_known(service): + # Validates that a service that requires HTTP 2 for all operations is known + message = 'Found unknown HTTP 2 service: %s' % service + assert _KNOWN_SERVICES.get(service) is _H2_REQUIRED, message + + +def _assert_h2_operation_is_known(service, operation): + # Validates that an operation that requires HTTP 2 is known + known_operations = _KNOWN_SERVICES.get(service, []) + message = 'Found unknown HTTP 2 operation: %s.%s' % (service, operation) + assert operation in known_operations, message diff --git a/tests/functional/test_kinesis.py b/tests/functional/test_kinesis.py new file mode 100644 index 00000000..0d6b525b --- /dev/null +++ b/tests/functional/test_kinesis.py @@ -0,0 +1,19 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from tests import BaseSessionTest + +class TestKinesis(BaseSessionTest): + def test_subscribe_to_shard_removed(self): + kinesis = self.session.create_client('kinesis', 'us-west-2') + with self.assertRaises(AttributeError): + kinesis.subscribe_to_shard diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py index 221355c4..a23800f1 100644 --- a/tests/functional/test_regions.py +++ b/tests/functional/test_regions.py @@ -453,7 +453,8 @@ def test_known_endpoints(): # fixed list of known endpoints. This list doesn't need to be kept 100% up # to date, but serves as a basis for regressions as the endpoint data # logic evolves. - resolver = _get_patched_session().get_component('endpoint_resolver') + resolver = _get_patched_session()._get_internal_component( + 'endpoint_resolver') for region_name, service_dict in KNOWN_REGIONS.items(): for service_name, endpoint in service_dict.items(): yield (_test_single_service_region, service_name, @@ -472,7 +473,7 @@ def _test_single_service_region(service_name, region_name, def test_all_s3_endpoints_have_s3v4(): session = _get_patched_session() partitions = session.get_available_partitions() - resolver = session.get_component('endpoint_resolver') + resolver = session._get_internal_component('endpoint_resolver') for partition_name in partitions: for endpoint in session.get_available_regions('s3', partition_name): resolved = resolver.construct_endpoint('s3', endpoint) @@ -481,7 +482,8 @@ def test_all_s3_endpoints_have_s3v4(): def test_known_endpoints(): - resolver = _get_patched_session().get_component('endpoint_resolver') + resolver = _get_patched_session()._get_internal_component( + 'endpoint_resolver') for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items(): yield (_test_single_service_partition_endpoint, service_name, endpoint, resolver) @@ -495,5 +497,6 @@ def _test_single_service_partition_endpoint(service_name, expected_endpoint, def test_non_partition_endpoint_requires_region(): - resolver = _get_patched_session().get_component('endpoint_resolver') + resolver = _get_patched_session()._get_internal_component( + 'endpoint_resolver') assert_raises(NoRegionError, resolver.construct_endpoint, 'ec2') diff --git a/tests/functional/test_sagemaker.py b/tests/functional/test_sagemaker.py new file mode 100644 index 00000000..5c914c0b --- /dev/null +++ b/tests/functional/test_sagemaker.py @@ -0,0 +1,41 @@ +from botocore.stub import Stubber +from tests import BaseSessionTest + + +class TestSagemaker(BaseSessionTest): + def setUp(self): + super(TestSagemaker, self).setUp() + self.region = 'us-west-2' + self.client = self.session.create_client( + 'sagemaker', self.region) + self.stubber = Stubber(self.client) + self.stubber.activate() + self.hook_calls = [] + + def _hook(self, **kwargs): + self.hook_calls.append(kwargs['event_name']) + + def tearDown(self): + self.stubber.deactivate() + + def test_event_with_old_prefix(self): + self.client.meta.events.register( + 'provide-client-params.sagemaker.ListEndpoints', + self._hook + ) + self.stubber.add_response('list_endpoints', {'Endpoints': []}) + self.client.list_endpoints() + self.assertEqual(self.hook_calls, [ + 'provide-client-params.sagemaker.ListEndpoints' + ]) + + def test_event_with_new_prefix(self): + self.client.meta.events.register( + 'provide-client-params.api.sagemaker.ListEndpoints', + self._hook + ) + self.stubber.add_response('list_endpoints', {'Endpoints': []}) + self.client.list_endpoints() + self.assertEqual(self.hook_calls, [ + 'provide-client-params.sagemaker.ListEndpoints' + ]) diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py index 76bcdedb..28965371 100644 --- a/tests/integration/test_smoke.py +++ b/tests/integration/test_smoke.py @@ -142,7 +142,7 @@ ERROR_TESTS = { 'cognito-identity': {'DescribeIdentityPool': {'IdentityPoolId': 'fake'}}, 'cognito-sync': {'DescribeIdentityPoolUsage': {'IdentityPoolId': 'fake'}}, 'config': { - 'GetResourceConfigHistory': {'resourceType': '', 'resourceId': ''}, + 'GetResourceConfigHistory': {'resourceType': '', 'resourceId': 'fake'}, }, 'datapipeline': {'GetPipelineDefinition': {'pipelineId': 'fake'}}, 'devicefarm': {'GetDevice': {'arn': 'arn:aws:devicefarm:REGION::device:f'}}, diff --git a/tests/unit/protocols/input/ec2.json b/tests/unit/protocols/input/ec2.json index 6b719cb3..5c075b7d 100644 --- a/tests/unit/protocols/input/ec2.json +++ b/tests/unit/protocols/input/ec2.json @@ -243,7 +243,7 @@ "headers": { "Content-Type": "application/x-www-form-urlencoded; charset=utf-8" }, - "body": "Action=OperationName&Version=2014-01-01&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c" + "body": "Action=OperationName&Version=2014-01-01&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c" } } ] @@ -353,9 +353,20 @@ "members": { "TimeArg": { "shape": "TimestampType" + }, + "TimeCustom": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType" + }, + "TimeFormat": { + "shape": "TimestampFormatType" } } }, + "TimestampFormatType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, "TimestampType": { "type": "timestamp" } @@ -369,14 +380,16 @@ "name": "OperationName" }, "params": { - "TimeArg": 1422172800 + "TimeArg": 1422172800, + "TimeCustom": 1422172800, + "TimeFormat": 1422172800 }, "serialized": { "uri": "/", "headers": { "Content-Type": "application/x-www-form-urlencoded; charset=utf-8" }, - "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z" + "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z&TimeCustom=1422172800&TimeFormat=1422172800" } } ] @@ -393,7 +406,7 @@ "members": { "Token": { "shape": "StringType", - "idempotencyToken": true + "idempotencyToken": true } } }, diff --git a/tests/unit/protocols/input/json.json b/tests/unit/protocols/input/json.json index 197ab4f8..2c5a35f2 100644 --- a/tests/unit/protocols/input/json.json +++ b/tests/unit/protocols/input/json.json @@ -57,9 +57,20 @@ "members": { "TimeArg": { "shape": "TimestampType" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeFormat": { + "shape": "TimestampFormatType" } } }, + "TimestampFormatType": { + "timestampFormat": "rfc822", + "type": "timestamp" + }, "TimestampType": { "type": "timestamp" } @@ -73,10 +84,12 @@ "name": "OperationName" }, "params": { - "TimeArg": 1422172800 + "TimeArg": 1422172800, + "TimeCustom": 1422172800, + "TimeFormat": 1422172800 }, "serialized": { - "body": "{\"TimeArg\": 1422172800}", + "body": "{\"TimeArg\": 1422172800, \"TimeCustom\": \"Sun, 25 Jan 2015 08:00:00 GMT\", \"TimeFormat\": \"Sun, 25 Jan 2015 08:00:00 GMT\"}", "headers": { "X-Amz-Target": "com.amazonaws.foo.OperationName", "Content-Type": "application/x-amz-json-1.1" @@ -101,7 +114,7 @@ "shape": "BlobType" }, "BlobMap": { - "shape": "BlobMapType" + "shape": "BlobMapType" } } }, @@ -146,8 +159,8 @@ }, "params": { "BlobMap": { - "key1": "foo", - "key2": "bar" + "key1": "foo", + "key2": "bar" } }, "serialized": { @@ -180,7 +193,7 @@ "ListOfStructures": { "type": "list", "member": { - "shape": "BlobType" + "shape": "BlobType" } }, "BlobType": { @@ -204,8 +217,10 @@ "serialized": { "body": "{\"ListParam\": [\"Zm9v\", \"YmFy\"]}", "uri": "/", - "headers": {"X-Amz-Target": "com.amazonaws.foo.OperationName", - "Content-Type": "application/x-amz-json-1.1"} + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + } } } ] @@ -491,7 +506,7 @@ "members": { "Token": { "shape": "StringType", - "idempotencyToken": true + "idempotencyToken": true } } }, diff --git a/tests/unit/protocols/input/query.json b/tests/unit/protocols/input/query.json index ca5f8d80..26e01779 100644 --- a/tests/unit/protocols/input/query.json +++ b/tests/unit/protocols/input/query.json @@ -607,9 +607,20 @@ "members": { "TimeArg": { "shape": "TimestampType" + }, + "TimeCustom": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType" + }, + "TimeFormat": { + "shape": "TimestampFormatType" } } }, + "TimestampFormatType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, "TimestampType": { "type": "timestamp" } @@ -623,14 +634,16 @@ "name": "OperationName" }, "params": { - "TimeArg": 1422172800 + "TimeArg": 1422172800, + "TimeCustom": 1422172800, + "TimeFormat": 1422172800 }, "serialized": { "uri": "/", "headers": { "Content-Type": "application/x-www-form-urlencoded; charset=utf-8" }, - "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z" + "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z&TimeCustom=1422172800&TimeFormat=1422172800" } } ] @@ -852,7 +865,7 @@ "members": { "Token": { "shape": "StringType", - "idempotencyToken": true + "idempotencyToken": true } } }, diff --git a/tests/unit/protocols/input/rest-json.json b/tests/unit/protocols/input/rest-json.json index 0d8d1bd6..74ade6a7 100644 --- a/tests/unit/protocols/input/rest-json.json +++ b/tests/unit/protocols/input/rest-json.json @@ -677,7 +677,7 @@ } ] }, - { + { "description": "Blob payload", "metadata": { "protocol": "rest-json", @@ -1103,9 +1103,51 @@ "shape": "TimestampType", "location": "header", "locationName": "x-amz-timearg" + }, + "TimeArgInQuery": { + "shape": "TimestampType", + "location": "querystring", + "locationName": "TimeQuery" + }, + "TimeCustom": { + "timestampFormat": "iso8601", + "shape": "TimestampType" + }, + "TimeCustomInHeader": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timecustom-header" + }, + "TimeCustomInQuery": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType", + "location": "querystring", + "locationName": "TimeCustomQuery" + }, + "TimeFormat": { + "shape": "TimestampFormatRfcType" + }, + "TimeFormatInHeader": { + "shape": "TimestampFormatUnixType", + "location": "header", + "locationName": "x-amz-timeformat-header" + }, + "TimeFormatInQuery": { + "shape": "TimestampFormatUnixType", + "location": "querystring", + "locationName": "TimeFormatQuery" } } }, + "TimestampFormatRfcType": { + "timestampFormat": "rfc822", + "type": "timestamp" + }, + "TimestampFormatUnixType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, "TimestampType": { "type": "timestamp" } @@ -1123,32 +1165,24 @@ "name": "OperationName" }, "params": { - "TimeArg": 1422172800 + "TimeArg": 1422172800, + "TimeArgInQuery": 1422172800, + "TimeArgInHeader": 1422172800, + "TimeCustom": 1422172800, + "TimeCustomInQuery": 1422172800, + "TimeCustomInHeader": 1422172800, + "TimeFormat": 1422172800, + "TimeFormatInQuery": 1422172800, + "TimeFormatInHeader": 1422172800 }, "serialized": { - "uri": "/path", - "headers": {}, - "body": "{\"TimeArg\": 1422172800}" - } - }, - { - "given": { - "input": { - "shape": "InputShape" + "uri": "/path?TimeQuery=2015-01-25T08%3A00%3A00Z&TimeCustomQuery=1422172800&TimeFormatQuery=1422172800", + "headers": { + "x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT", + "x-amz-timecustom-header": "1422172800", + "x-amz-timeformat-header": "1422172800" }, - "http": { - "method": "POST", - "requestUri": "/path" - }, - "name": "OperationName" - }, - "params": { - "TimeArgInHeader": 1422172800 - }, - "serialized": { - "uri": "/path", - "headers": {"x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT"}, - "body": "" + "body": "{\"TimeArg\": 1422172800, \"TimeCustom\": \"2015-01-25T08:00:00Z\", \"TimeFormat\": \"Sun, 25 Jan 2015 08:00:00 GMT\"}" } } ] @@ -1251,7 +1285,7 @@ "members": { "Token": { "shape": "StringType", - "idempotencyToken": true + "idempotencyToken": true } } }, diff --git a/tests/unit/protocols/input/rest-xml.json b/tests/unit/protocols/input/rest-xml.json index ca2f0a04..44fb4d4f 100644 --- a/tests/unit/protocols/input/rest-xml.json +++ b/tests/unit/protocols/input/rest-xml.json @@ -599,7 +599,7 @@ ] }, { - "description": "Blob and timestamp shapes", + "description": "Blob shapes", "metadata": { "protocol": "rest-xml", "apiVersion": "2014-01-01" @@ -616,17 +616,11 @@ "StructureShape": { "type": "structure", "members": { - "t": { - "shape": "TShape" - }, "b": { "shape": "BShape" } } }, - "TShape": { - "type": "timestamp" - }, "BShape": { "type": "blob" } @@ -647,19 +641,122 @@ }, "params": { "StructureParam": { - "t": 1422172800, "b": "foo" } }, "serialized": { "method": "POST", - "body": "2015-01-25T08:00:00ZZm9v", + "body": "Zm9v", "uri": "/2014-01-01/hostedzone", "headers": {} } } ] }, + { + "description": "Timestamp shapes", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + }, + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + }, + "TimeArgInQuery": { + "shape": "TimestampType", + "location": "querystring", + "locationName": "TimeQuery" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeCustomInHeader": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timecustom-header" + }, + "TimeCustomInQuery": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType", + "location": "querystring", + "locationName": "TimeCustomQuery" + }, + "TimeFormat": { + "shape": "TimestampFormatRfcType" + }, + "TimeFormatInHeader": { + "shape": "TimestampFormatUnixType", + "location": "header", + "locationName": "x-amz-timeformat-header" + }, + "TimeFormatInQuery": { + "shape": "TimestampFormatUnixType", + "location": "querystring", + "locationName": "TimeFormatQuery" + } + } + }, + "TimestampFormatRfcType": { + "timestampFormat": "rfc822", + "type": "timestamp" + }, + "TimestampFormatUnixType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "TimestampStructure", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800, + "TimeArgInQuery": 1422172800, + "TimeArgInHeader": 1422172800, + "TimeCustom": 1422172800, + "TimeCustomInQuery": 1422172800, + "TimeCustomInHeader": 1422172800, + "TimeFormat": 1422172800, + "TimeFormatInQuery": 1422172800, + "TimeFormatInHeader": 1422172800 + }, + "serialized": { + "method": "POST", + "body": "2015-01-25T08:00:00ZSun, 25 Jan 2015 08:00:00 GMTSun, 25 Jan 2015 08:00:00 GMT", + "uri": "/2014-01-01/hostedzone?TimeQuery=2015-01-25T08%3A00%3A00Z&TimeCustomQuery=1422172800&TimeFormatQuery=1422172800", + "headers": { + "x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT", + "x-amz-timecustom-header": "1422172800", + "x-amz-timeformat-header": "1422172800" + } + } + } + ] + }, { "description": "Header maps", "metadata": { @@ -1586,51 +1683,6 @@ } ] }, - { - "description": "Timestamp in header", - "metadata": { - "protocol": "rest-xml", - "apiVersion": "2014-01-01" - }, - "shapes": { - "InputShape": { - "type": "structure", - "members": { - "TimeArgInHeader": { - "shape": "TimestampType", - "location": "header", - "locationName": "x-amz-timearg" - } - } - }, - "TimestampType": { - "type": "timestamp" - } - }, - "cases": [ - { - "given": { - "input": { - "shape": "InputShape" - }, - "http": { - "method": "POST", - "requestUri": "/path" - }, - "name": "OperationName" - }, - "params": { - "TimeArgInHeader": 1422172800 - }, - "serialized": { - "method": "POST", - "body": "", - "uri": "/path", - "headers": {"x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT"} - } - } - ] - }, { "description": "Idempotency token auto fill", "metadata": { diff --git a/tests/unit/protocols/output/ec2.json b/tests/unit/protocols/output/ec2.json index 5b76bf5f..d79239cd 100644 --- a/tests/unit/protocols/output/ec2.json +++ b/tests/unit/protocols/output/ec2.json @@ -450,5 +450,73 @@ } } ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeFormat": { + "shape": "TimestampFormatType" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimestampType" + }, + "bar": { + "shape": "TimestampFormatType" + } + } + }, + "TimestampFormatType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeArg": 1398796238, + "TimeCustom": 1398796238, + "TimeFormat": 1398796238, + "StructMember": { + "foo": 1398796238, + "bar": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "2014-04-29T18:30:38+00:0013987962382014-04-29T18:30:38+00:00Tue, 29 Apr 2014 18:30:38 GMT1398796238requestid" + } + } + ] } ] diff --git a/tests/unit/protocols/output/json.json b/tests/unit/protocols/output/json.json index 58e38bf4..60c55c2d 100644 --- a/tests/unit/protocols/output/json.json +++ b/tests/unit/protocols/output/json.json @@ -142,24 +142,38 @@ "OutputShape": { "type": "structure", "members": { - "TimeMember": { - "shape": "TimeType" + "TimeArg": { + "shape": "TimestampType" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeFormat": { + "shape": "TimestampFormatType" }, "StructMember": { "shape": "TimeContainer" } } }, - "TimeType": { - "type": "timestamp" - }, "TimeContainer": { "type": "structure", "members": { "foo": { - "shape": "TimeType" + "shape": "TimestampType" + }, + "bar": { + "shape": "TimestampFormatType" } } + }, + "TimestampFormatType": { + "timestampFormat": "iso8601", + "type": "timestamp" + }, + "TimestampType": { + "type": "timestamp" } }, "cases": [ @@ -171,15 +185,18 @@ "name": "OperationName" }, "result": { - "TimeMember": 1398796238, + "TimeArg": 1398796238, + "TimeCustom": 1398796238, + "TimeFormat": 1398796238, "StructMember": { - "foo": 1398796238 + "foo": 1398796238, + "bar": 1398796238 } }, "response": { "status_code": 200, "headers": {}, - "body": "{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}" + "body": "{\"TimeArg\": 1398796238, \"TimeCustom\": \"Tue, 29 Apr 2014 18:30:38 GMT\", \"TimeFormat\": \"2014-04-29T18:30:38+00:00\", \"StructMember\": {\"foo\": 1398796238, \"bar\": \"2014-04-29T18:30:38+00:00\"}}" } } ] diff --git a/tests/unit/protocols/output/query.json b/tests/unit/protocols/output/query.json index 995f3e53..bbe5b5ad 100644 --- a/tests/unit/protocols/output/query.json +++ b/tests/unit/protocols/output/query.json @@ -772,5 +772,73 @@ } } ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeFormat": { + "shape": "TimestampFormatType" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimestampType" + }, + "bar": { + "shape": "TimestampFormatType" + } + } + }, + "TimestampFormatType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeArg": 1398796238, + "TimeCustom": 1398796238, + "TimeFormat": 1398796238, + "StructMember": { + "foo": 1398796238, + "bar": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "2014-04-29T18:30:38+00:0013987962382014-04-29T18:30:38+00:00Tue, 29 Apr 2014 18:30:38 GMT1398796238requestid" + } + } + ] } ] diff --git a/tests/unit/protocols/output/rest-json.json b/tests/unit/protocols/output/rest-json.json index 53bfdf28..0246ae8d 100644 --- a/tests/unit/protocols/output/rest-json.json +++ b/tests/unit/protocols/output/rest-json.json @@ -166,24 +166,54 @@ "OutputShape": { "type": "structure", "members": { - "TimeMember": { - "shape": "TimeType" + "TimeArg": { + "shape": "TimestampType" + }, + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeCustomInHeader": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timecustom" + }, + "TimeFormat": { + "shape": "TimestampFormatType" + }, + "TimeFormatInHeader": { + "shape": "TimestampFormatType", + "location": "header", + "locationName": "x-amz-timeformat" }, "StructMember": { "shape": "TimeContainer" } } }, - "TimeType": { - "type": "timestamp" - }, "TimeContainer": { "type": "structure", "members": { "foo": { - "shape": "TimeType" + "shape": "TimestampType" + }, + "bar": { + "shape": "TimestampFormatType" } } + }, + "TimestampFormatType": { + "timestampFormat": "iso8601", + "type": "timestamp" + }, + "TimestampType": { + "type": "timestamp" } }, "cases": [ @@ -195,15 +225,25 @@ "name": "OperationName" }, "result": { - "TimeMember": 1398796238, + "TimeArg": 1398796238, + "TimeArgInHeader": 1398796238, + "TimeCustom": 1398796238, + "TimeCustomInHeader": 1398796238, + "TimeFormat": 1398796238, + "TimeFormatInHeader": 1398796238, "StructMember": { - "foo": 1398796238 + "foo": 1398796238, + "bar": 1398796238 } }, "response": { "status_code": 200, - "headers": {}, - "body": "{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}" + "headers": { + "x-amz-timearg": "Tue, 29 Apr 2014 18:30:38 GMT", + "x-amz-timecustom": "1398796238", + "x-amz-timeformat": "2014-04-29T18:30:38+00:00" + }, + "body": "{\"TimeArg\": 1398796238, \"TimeCustom\": \"Tue, 29 Apr 2014 18:30:38 GMT\", \"TimeFormat\": \"2014-04-29T18:30:38+00:00\", \"StructMember\": {\"foo\": 1398796238, \"bar\": \"2014-04-29T18:30:38+00:00\"}}" } } ] diff --git a/tests/unit/protocols/output/rest-xml.json b/tests/unit/protocols/output/rest-xml.json index a4ad7a01..9e37d8fa 100644 --- a/tests/unit/protocols/output/rest-xml.json +++ b/tests/unit/protocols/output/rest-xml.json @@ -756,6 +756,96 @@ } } ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + }, + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + }, + "TimeCustom": { + "timestampFormat": "rfc822", + "shape": "TimestampType" + }, + "TimeCustomInHeader": { + "timestampFormat": "unixTimestamp", + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timecustom" + }, + "TimeFormat": { + "shape": "TimestampFormatType" + }, + "TimeFormatInHeader": { + "shape": "TimestampFormatType", + "location": "header", + "locationName": "x-amz-timeformat" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimestampType" + }, + "bar": { + "shape": "TimestampFormatType" + } + } + }, + "TimestampFormatType": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeArg": 1398796238, + "TimeArgInHeader": 1398796238, + "TimeCustom": 1398796238, + "TimeCustomInHeader": 1398796238, + "TimeFormat": 1398796238, + "TimeFormatInHeader": 1398796238, + "StructMember": { + "foo": 1398796238, + "bar": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": { + "x-amz-timearg": "Tue, 29 Apr 2014 18:30:38 GMT", + "x-amz-timecustom": "1398796238", + "x-amz-timeformat": "1398796238" + }, + "body": "2014-04-29T18:30:38+00:0013987962382014-04-29T18:30:38+00:00Tue, 29 Apr 2014 18:30:38 GMT1398796238requestid" + } + } + ] } - ] diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py index 6757cff0..e127916b 100644 --- a/tests/unit/test_configloader.py +++ b/tests/unit/test_configloader.py @@ -67,6 +67,13 @@ class TestConfigLoader(BaseEnvVar): with self.assertRaises(botocore.exceptions.ConfigParseError): raw_config_parse(filename) + def test_config_parse_error_filesystem_encoding_none(self): + filename = path('aws_config_bad') + with mock.patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = None + with self.assertRaises(botocore.exceptions.ConfigParseError): + raw_config_parse(filename) + def test_config(self): loaded_config = raw_config_parse(path('aws_config')) self.assertIn('default', loaded_config) @@ -118,6 +125,13 @@ class TestConfigLoader(BaseEnvVar): with self.assertRaises(botocore.exceptions.ConfigParseError): loaded_config = load_config(filename) + def test_nested_bad_config_filesystem_encoding_none(self): + filename = path('aws_config_nested_bad') + with mock.patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = None + with self.assertRaises(botocore.exceptions.ConfigParseError): + loaded_config = load_config(filename) + def test_multi_file_load(self): filenames = [path('aws_config_other'), path('aws_config'), @@ -142,6 +156,12 @@ class TestConfigLoader(BaseEnvVar): encoding.return_value = 'utf-8' load_config(path(b'\xe2\x9c\x93')) + def test_unicode_bytes_path_not_found_filesystem_encoding_none(self): + with mock.patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = None + with self.assertRaises(botocore.exceptions.ConfigNotFound): + load_config(path(b'\xe2\x9c\x93')) + def test_unicode_bytes_path(self): filename = self.create_config_file(b'aws_config_unicode\xe2\x9c\x93') with mock.patch('sys.getfilesystemencoding') as encoding: diff --git a/tests/unit/test_hooks.py b/tests/unit/test_hooks.py index 9040cfc7..72495525 100644 --- a/tests/unit/test_hooks.py +++ b/tests/unit/test_hooks.py @@ -17,6 +17,7 @@ from tests import unittest from functools import partial from botocore.hooks import HierarchicalEmitter, first_non_none_response +from botocore.hooks import AliasedEventEmitter class TestHierarchicalEventEmitter(unittest.TestCase): @@ -61,6 +62,68 @@ class TestHierarchicalEventEmitter(unittest.TestCase): self.assertEqual(calls, ['foo.bar.baz', 'foo.bar', 'foo']) +class TestAliasedEventEmitter(unittest.TestCase): + def setUp(self): + self.hook_calls = [] + + def hook(self, **kwargs): + self.hook_calls.append(kwargs) + + def test_event_emitted(self): + aliases = {'bar': 'bear'} + emitter = AliasedEventEmitter(event_aliases=aliases) + emitter.register('foo.bear.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + + def test_aliased_event_emitted(self): + aliases = {'bar': 'bear'} + emitter = AliasedEventEmitter(event_aliases=aliases) + emitter.register('foo.bear.baz', self.hook) + emitter.emit('foo.bar.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + + def test_aliased_event_registered(self): + aliases = {'bar': 'bear'} + emitter = AliasedEventEmitter(event_aliases=aliases) + emitter.register('foo.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + + def test_event_unregistered(self): + aliases = {'bar': 'bear'} + emitter = AliasedEventEmitter(event_aliases=aliases) + + emitter.register('foo.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + + self.hook_calls = [] + emitter.unregister('foo.bear.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, []) + + def test_aliased_event_unregistered(self): + aliases = {'bar': 'bear'} + emitter = AliasedEventEmitter(event_aliases=aliases) + + emitter.register('foo.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + + self.hook_calls = [] + emitter.unregister('foo.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, []) + + class TestStopProcessing(unittest.TestCase): def setUp(self): self.emitter = HierarchicalEmitter() diff --git a/tests/unit/test_paginate.py b/tests/unit/test_paginate.py index 39179e5d..bf8666b4 100644 --- a/tests/unit/test_paginate.py +++ b/tests/unit/test_paginate.py @@ -1171,7 +1171,7 @@ class TestIncludeNonResultKeys(unittest.TestCase): {'Result': {'Key': ['bar', 'baz'], 'Inner': 'v3'}, 'Outer': 'v4', 'NextToken': 't2'}, {'Result': {'Key': ['qux'], 'Inner': 'v5'}, - 'Outer': 'v6', 'NextToken': 't3'}, + 'Outer': 'v6'}, ] pages = self.paginator.paginate() actual = pages.build_full_result() @@ -1404,7 +1404,7 @@ class TestStringPageSize(unittest.TestCase): self.service = model.ServiceModel(self.service_model) self.model = self.service.operation_model('ListStuff') self.method = mock.Mock() - self.method.side_effect = [] + self.method.side_effect = [{}] self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_int_page_size(self): diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 80b0db0b..46bd4b76 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -327,13 +327,15 @@ class TestSessionPartitionFiles(BaseSessionTest): def test_lists_partitions_on_disk(self): mock_resolver = mock.Mock() mock_resolver.get_available_partitions.return_value = ['foo'] - self.session.register_component('endpoint_resolver', mock_resolver) + self.session._register_internal_component( + 'endpoint_resolver', mock_resolver) self.assertEqual(['foo'], self.session.get_available_partitions()) def test_proxies_list_endpoints_to_resolver(self): resolver = mock.Mock() resolver.get_available_endpoints.return_value = ['a', 'b'] - self.session.register_component('endpoint_resolver', resolver) + self.session._register_internal_component( + 'endpoint_resolver', resolver) self.session.get_available_regions('foo', 'bar', True) def test_provides_empty_list_for_unknown_service_regions(self): @@ -653,6 +655,30 @@ class TestCreateClient(BaseSessionTest): self.assertEqual(call_kwargs['api_version'], override_api_version) +class TestSessionComponent(BaseSessionTest): + def test_internal_component(self): + component = object() + self.session._register_internal_component('internal', component) + self.assertIs( + self.session._get_internal_component('internal'), component) + with self.assertRaises(ValueError): + self.session.get_component('internal') + + def test_internal_endpoint_resolver_is_same_as_deprecated_public(self): + endpoint_resolver = self.session._get_internal_component( + 'endpoint_resolver') + self.assertIs( + self.session.get_component('endpoint_resolver'), endpoint_resolver) + + def test_internal_exceptions_factory_is_same_as_deprecated_public(self): + exceptions_factory = self.session._get_internal_component( + 'exceptions_factory') + self.assertIs( + self.session.get_component('exceptions_factory'), + exceptions_factory + ) + + class TestComponentLocator(unittest.TestCase): def setUp(self): self.components = botocore.session.ComponentLocator() diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 054aee45..a108d5ef 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1440,6 +1440,26 @@ class TestS3RegionRedirector(unittest.TestCase): request_dict, response, self.operation) self.assertIsNone(redirect_response) + def test_does_not_redirect_400_head_bucket_no_region_header(self): + # We should not redirect a 400 Head* if the region header is not + # present as this will lead to infinitely calling HeadBucket. + request_dict = {'url': 'https://us-west-2.amazonaws.com/foo', + 'context': {'signing': {'bucket': 'foo'}}} + response = (None, { + 'Error': {'Code': '400', 'Message': 'Bad Request'}, + 'ResponseMetadata': { + 'HTTPHeaders': {} + } + }) + + self.operation.name = 'HeadBucket' + redirect_response = self.redirector.redirect_from_error( + request_dict, response, self.operation) + head_bucket_calls = self.client.head_bucket.call_count + self.assertIsNone(redirect_response) + # We should not have made an additional head bucket call + self.assertEqual(head_bucket_calls, 0) + def test_does_not_redirect_if_None_response(self): request_dict = {'url': 'https://us-west-2.amazonaws.com/foo', 'context': {'signing': {'bucket': 'foo'}}}