From 5aa18848d9352bd215ddfd79bda0701be1f04972 Mon Sep 17 00:00:00 2001 From: TANIGUCHI Takaki Date: Wed, 27 Feb 2019 16:30:11 +0900 Subject: [PATCH] New upstream version 1.12.103+repack --- PKG-INFO | 2 +- botocore.egg-info/PKG-INFO | 2 +- botocore.egg-info/SOURCES.txt | 13 +- botocore/__init__.py | 2 +- botocore/config.py | 4 +- .../data/acm-pca/2017-08-22/paginators-1.json | 6 + .../data/acm-pca/2017-08-22/service-2.json | 12 +- .../data/acm-pca/2017-08-22/waiters-2.json | 6 + .../2017-11-09/paginators-1.json | 36 + .../data/amplify/2017-07-25/paginators-1.json | 27 +- .../apigateway/2015-07-09/paginators-1.json | 36 + .../2018-11-29/service-2.json | 4 +- .../apigatewayv2/2018-11-29/paginators-1.json | 63 +- .../2016-02-06/paginators-1.json | 6 + .../2016-02-06/service-2.json | 130 +- .../data/appmesh/2018-10-01/service-2.json | 56 +- .../data/appstream/2016-12-01/service-2.json | 156 +- .../data/appsync/2017-07-25/paginators-1.json | 45 +- .../data/athena/2017-05-18/service-2.json | 600 ++- .../2018-01-06/paginators-1.json | 15 +- .../autoscaling/2011-01-01/paginators-1.json | 12 + .../autoscaling/2011-01-01/service-2.json | 109 +- .../data/backup/2018-11-15/paginators-1.json | 3 + .../data/backup/2018-11-15/service-2.json | 3148 ++++++++++++++ .../data/batch/2016-08-10/paginators-1.json | 27 +- .../data/budgets/2016-10-20/paginators-1.json | 21 +- botocore/data/ce/2017-10-25/service-2.json | 12 +- .../data/chime/2018-05-01/paginators-1.json | 15 +- botocore/data/chime/2018-05-01/service-2.json | 18 +- .../data/cloud9/2017-09-23/service-2.json | 35 +- .../2017-01-11/paginators-1.json | 18 + .../2010-05-15/paginators-1.json | 55 + .../cloudhsm/2014-05-30/paginators-1.json | 18 +- .../cloudtrail/2013-11-01/paginators-1.json | 10 + .../cloudwatch/2010-08-01/paginators-1.json | 6 + .../data/cloudwatch/2010-08-01/service-2.json | 34 +- .../data/codebuild/2016-10-06/service-2.json | 134 +- .../data/codecommit/2015-04-13/service-2.json | 313 +- .../codedeploy/2014-10-06/paginators-1.json | 15 + .../data/codedeploy/2014-10-06/service-2.json | 238 +- .../codepipeline/2015-07-09/paginators-1.json | 25 +- .../codestar/2017-04-19/paginators-1.json | 27 +- .../2014-06-30/paginators-1.json | 9 +- .../cognito-idp/2016-04-18/paginators-1.json | 51 +- .../comprehend/2017-11-27/paginators-1.json | 42 + .../data/config/2014-11-12/paginators-1.json | 53 + .../data/connect/2017-08-08/paginators-1.json | 33 +- botocore/data/cur/2017-01-06/service-2.json | 116 +- .../datasync/2018-11-09/paginators-1.json | 33 +- .../data/dax/2017-04-19/paginators-1.json | 44 +- .../devicefarm/2015-06-23/paginators-1.json | 33 + .../data/devicefarm/2015-06-23/service-2.json | 102 +- .../2012-10-25/paginators-1.json | 21 +- .../directconnect/2012-10-25/service-2.json | 41 +- .../discovery/2015-11-01/paginators-1.json | 39 +- .../data/discovery/2015-11-01/service-2.json | 308 +- botocore/data/dlm/2018-01-12/service-2.json | 7 +- botocore/data/dms/2016-01-01/waiters-2.json | 10 +- .../data/docdb/2014-10-31/paginators-1.json | 40 + botocore/data/docdb/2014-10-31/service-2.json | 3720 +++++++++++++++++ botocore/data/docdb/2014-10-31/waiters-2.json | 90 + botocore/data/ds/2015-04-16/paginators-1.json | 48 + botocore/data/ds/2015-04-16/service-2.json | 12 + .../dynamodb/2012-08-10/paginators-1.json | 5 + .../data/dynamodb/2012-08-10/service-2.json | 35 +- .../data/ec2/2016-11-15/paginators-1.json | 246 ++ botocore/data/ec2/2016-11-15/service-2.json | 585 +-- botocore/data/ecr/2015-09-21/service-2.json | 3 +- .../data/ecs/2014-11-13/paginators-1.json | 12 + botocore/data/ecs/2014-11-13/service-2.json | 230 +- botocore/data/efs/2015-02-01/service-2.json | 218 +- .../data/eks/2017-11-01/paginators-1.json | 15 +- .../2010-12-01/paginators-1.json | 24 + .../data/elb/2012-06-01/paginators-1.json | 6 + .../data/elbv2/2015-12-01/paginators-1.json | 24 + botocore/data/elbv2/2015-12-01/service-2.json | 73 +- .../data/emr/2009-03-31/paginators-1.json | 5 + botocore/data/emr/2009-03-31/service-2.json | 27 +- botocore/data/endpoints.json | 408 +- botocore/data/es/2015-01-01/paginators-1.json | 18 + botocore/data/es/2015-01-01/service-2.json | 14 + .../data/fms/2018-01-01/paginators-1.json | 21 +- botocore/data/fms/2018-01-01/service-2.json | 16 +- .../data/fsx/2018-03-01/paginators-1.json | 21 +- botocore/data/fsx/2018-03-01/service-2.json | 42 +- .../gamelift/2015-10-01/paginators-1.json | 99 +- .../data/gamelift/2015-10-01/service-2.json | 293 +- .../2018-08-08/paginators-1.json | 21 +- .../data/glue/2017-03-31/paginators-1.json | 6 + botocore/data/glue/2017-03-31/service-2.json | 621 ++- .../greengrass/2017-06-07/paginators-1.json | 118 + .../inspector/2016-02-16/paginators-1.json | 6 + .../data/iot/2015-05-28/paginators-1.json | 155 + botocore/data/iot/2015-05-28/service-2.json | 150 +- .../2018-05-14/paginators-1.json | 16 + .../2018-05-14/paginators-1.json | 15 +- .../iotanalytics/2017-11-27/paginators-1.json | 33 +- .../iotanalytics/2017-11-27/service-2.json | 95 +- .../data/kafka/2018-11-14/paginators-1.json | 15 +- .../2017-09-30/paginators-1.json | 9 +- .../2017-09-30/service-2.json | 74 +- .../2017-09-30/service-2.json | 10 +- .../data/kinesis/2013-12-02/paginators-1.json | 12 + .../2018-05-23/paginators-1.json | 15 +- .../kinesisvideo/2017-09-30/paginators-1.json | 9 +- .../kinesisvideo/2017-09-30/service-2.json | 24 +- .../data/lambda/2015-03-31/paginators-1.json | 18 + .../data/lambda/2015-03-31/service-2.json | 424 +- .../2018-08-01/paginators-1.json | 33 +- .../lightsail/2016-11-28/paginators-1.json | 55 + .../data/lightsail/2016-11-28/service-2.json | 38 +- .../data/logs/2014-03-28/paginators-1.json | 18 + botocore/data/logs/2014-03-28/service-2.json | 6 +- .../data/macie/2017-12-19/paginators-1.json | 15 +- .../2017-01-11/paginators-1.json | 9 +- .../mediaconnect/2018-11-14/paginators-1.json | 6 + .../mediaconnect/2018-11-14/service-2.json | 137 + .../mediaconvert/2017-08-29/service-2.json | 425 +- .../data/medialive/2017-10-14/service-2.json | 393 +- .../mediapackage/2017-10-12/service-2.json | 24 + .../2017-09-01/paginators-1.json | 9 +- .../mediastore/2017-09-01/paginators-1.json | 9 +- .../data/mediastore/2017-09-01/service-2.json | 73 +- .../mediatailor/2018-04-23/paginators-1.json | 9 +- .../mediatailor/2018-04-23/service-2.json | 273 +- .../data/mgh/2017-05-31/paginators-1.json | 27 +- botocore/data/mq/2017-11-27/paginators-1.json | 9 +- .../data/neptune/2014-10-31/paginators-1.json | 30 + .../opsworkscm/2016-11-01/paginators-1.json | 21 +- .../data/opsworkscm/2016-11-01/service-2.json | 18 +- .../organizations/2016-11-28/service-2.json | 58 +- .../2018-07-26/paginators-1.json | 33 +- .../2018-09-05/service-2.json | 677 +++ .../data/pinpoint/2016-12-01/service-2.json | 163 +- .../data/polly/2016-06-10/paginators-1.json | 11 + .../data/ram/2018-01-04/paginators-1.json | 39 +- .../data/rds-data/2018-08-01/service-2.json | 702 ++-- .../data/rds/2014-10-31/paginators-1.json | 6 + botocore/data/rds/2014-10-31/service-2.json | 153 +- .../redshift/2012-12-01/paginators-1.json | 42 + .../data/redshift/2012-12-01/service-2.json | 20 +- .../rekognition/2016-06-27/service-2.json | 124 +- .../resource-groups/2017-11-27/service-2.json | 2 +- .../robomaker/2018-06-29/paginators-1.json | 39 +- .../data/robomaker/2018-06-29/service-2.json | 269 +- .../data/route53/2013-04-01/paginators-1.json | 6 + .../2014-05-15/paginators-1.json | 6 + .../2018-04-01/paginators-1.json | 9 +- .../sagemaker/2017-07-24/paginators-1.json | 78 + .../data/sagemaker/2017-07-24/service-2.json | 167 +- .../2017-10-17/paginators-1.json | 9 +- .../secretsmanager/2017-10-17/service-2.json | 26 +- .../securityhub/2018-10-26/paginators-1.json | 39 +- .../2017-09-08/paginators-1.json | 21 +- .../2015-12-10/paginators-1.json | 42 + .../servicecatalog/2015-12-10/service-2.json | 4 +- .../data/ses/2010-12-01/paginators-1.json | 17 + .../data/shield/2016-06-02/paginators-1.json | 6 + .../data/shield/2016-06-02/service-2.json | 15 +- .../data/signer/2017-08-25/paginators-1.json | 21 +- .../data/sms-voice/2018-09-05/service-2.json | 83 +- .../data/sms/2016-10-24/paginators-1.json | 6 + .../snowball/2016-06-30/paginators-1.json | 18 + .../data/sns/2010-03-31/paginators-1.json | 5 + .../data/ssm/2014-11-06/paginators-1.json | 168 + botocore/data/ssm/2014-11-06/service-2.json | 65 +- .../stepfunctions/2016-11-23/service-2.json | 83 +- .../2013-06-30/paginators-1.json | 6 + .../storagegateway/2013-06-30/service-2.json | 177 +- .../transfer/2018-11-05/paginators-1.json | 9 +- .../data/transfer/2018-11-05/service-2.json | 24 +- .../translate/2017-07-01/paginators-1.json | 9 +- .../data/waf/2015-08-24/paginators-1.json | 53 + .../workdocs/2016-05-01/paginators-1.json | 36 + .../data/workdocs/2016-05-01/service-2.json | 6 +- .../worklink/2018-09-25/paginators-1.json | 3 + .../data/worklink/2018-09-25/service-2.json | 1170 ++++++ .../workmail/2017-10-01/paginators-1.json | 12 + .../workspaces/2015-04-08/paginators-1.json | 28 + .../data/workspaces/2015-04-08/service-2.json | 5 +- .../data/xray/2016-04-12/paginators-1.json | 15 + botocore/docs/service.py | 6 + botocore/eventstream.py | 43 +- botocore/handlers.py | 28 +- botocore/model.py | 17 +- botocore/monitoring.py | 57 +- botocore/parsers.py | 83 +- botocore/response.py | 3 +- docs/source/conf.py | 4 +- setup.py | 4 +- tests/functional/csm/cases.json | 141 +- tests/functional/docs/__init__.py | 9 + .../test_sms_voice.py} | 17 +- tests/functional/test_endpoints.py | 1 + tests/functional/test_paginator_config.py | 129 +- tests/integration/test_ec2.py | 3 +- tests/unit/protocols/output/event-stream.json | 174 - tests/unit/protocols/output/json.json | 90 + tests/unit/protocols/output/rest-xml.json | 172 + tests/unit/test_eventstream.py | 61 +- tests/unit/test_monitoring.py | 45 + tests/unit/test_parsers.py | 60 +- tests/unit/test_response.py | 6 + 203 files changed, 19661 insertions(+), 2566 deletions(-) create mode 100644 botocore/data/backup/2018-11-15/paginators-1.json create mode 100644 botocore/data/backup/2018-11-15/service-2.json create mode 100644 botocore/data/docdb/2014-10-31/paginators-1.json create mode 100644 botocore/data/docdb/2014-10-31/service-2.json create mode 100644 botocore/data/docdb/2014-10-31/waiters-2.json create mode 100644 botocore/data/greengrass/2017-06-07/paginators-1.json create mode 100644 botocore/data/iot1click-devices/2018-05-14/paginators-1.json create mode 100644 botocore/data/pinpoint-sms-voice/2018-09-05/service-2.json create mode 100644 botocore/data/worklink/2018-09-25/paginators-1.json create mode 100644 botocore/data/worklink/2018-09-25/service-2.json rename tests/functional/{test_kinesis.py => docs/test_sms_voice.py} (57%) delete mode 100644 tests/unit/protocols/output/event-stream.json diff --git a/PKG-INFO b/PKG-INFO index 1fc9cb43..485539dc 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.12.71 +Version: 1.12.103 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 1fc9cb43..485539dc 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.12.71 +Version: 1.12.103 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 9ac2e397..e34648cd 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -83,6 +83,8 @@ botocore/data/autoscaling-plans/2018-01-06/service-2.json botocore/data/autoscaling/2011-01-01/examples-1.json botocore/data/autoscaling/2011-01-01/paginators-1.json botocore/data/autoscaling/2011-01-01/service-2.json +botocore/data/backup/2018-11-15/paginators-1.json +botocore/data/backup/2018-11-15/service-2.json botocore/data/batch/2016-08-10/examples-1.json botocore/data/batch/2016-08-10/paginators-1.json botocore/data/batch/2016-08-10/service-2.json @@ -231,6 +233,9 @@ botocore/data/dms/2016-01-01/examples-1.json botocore/data/dms/2016-01-01/paginators-1.json botocore/data/dms/2016-01-01/service-2.json botocore/data/dms/2016-01-01/waiters-2.json +botocore/data/docdb/2014-10-31/paginators-1.json +botocore/data/docdb/2014-10-31/service-2.json +botocore/data/docdb/2014-10-31/waiters-2.json botocore/data/ds/2015-04-16/examples-1.json botocore/data/ds/2015-04-16/paginators-1.json botocore/data/ds/2015-04-16/service-2.json @@ -330,6 +335,7 @@ botocore/data/globalaccelerator/2018-08-08/paginators-1.json botocore/data/globalaccelerator/2018-08-08/service-2.json botocore/data/glue/2017-03-31/paginators-1.json botocore/data/glue/2017-03-31/service-2.json +botocore/data/greengrass/2017-06-07/paginators-1.json botocore/data/greengrass/2017-06-07/service-2.json botocore/data/guardduty/2017-11-28/paginators-1.json botocore/data/guardduty/2017-11-28/service-2.json @@ -352,6 +358,7 @@ botocore/data/iot-jobs-data/2017-09-29/service-2.json botocore/data/iot/2015-05-28/examples-1.json botocore/data/iot/2015-05-28/paginators-1.json botocore/data/iot/2015-05-28/service-2.json +botocore/data/iot1click-devices/2018-05-14/paginators-1.json botocore/data/iot1click-devices/2018-05-14/service-2.json botocore/data/iot1click-projects/2018-05-14/paginators-1.json botocore/data/iot1click-projects/2018-05-14/service-2.json @@ -447,6 +454,7 @@ botocore/data/pi/2018-02-27/paginators-1.json botocore/data/pi/2018-02-27/service-2.json botocore/data/pinpoint-email/2018-07-26/paginators-1.json botocore/data/pinpoint-email/2018-07-26/service-2.json +botocore/data/pinpoint-sms-voice/2018-09-05/service-2.json botocore/data/pinpoint/2016-12-01/examples-1.json botocore/data/pinpoint/2016-12-01/service-2.json botocore/data/polly/2016-06-10/examples-1.json @@ -568,6 +576,8 @@ botocore/data/waf/2015-08-24/paginators-1.json botocore/data/waf/2015-08-24/service-2.json botocore/data/workdocs/2016-05-01/paginators-1.json botocore/data/workdocs/2016-05-01/service-2.json +botocore/data/worklink/2018-09-25/paginators-1.json +botocore/data/worklink/2018-09-25/service-2.json botocore/data/workmail/2017-10-01/paginators-1.json botocore/data/workmail/2017-10-01/service-2.json botocore/data/workspaces/2015-04-08/examples-1.json @@ -769,7 +779,6 @@ tests/functional/test_events.py tests/functional/test_h2_required.py tests/functional/test_history.py tests/functional/test_iot_data.py -tests/functional/test_kinesis.py tests/functional/test_lex.py tests/functional/test_loaders.py tests/functional/test_machinelearning.py @@ -807,6 +816,7 @@ tests/functional/docs/test_glacier.py tests/functional/docs/test_lex.py tests/functional/docs/test_s3.py tests/functional/docs/test_shared_example_config.py +tests/functional/docs/test_sms_voice.py tests/functional/docs/test_streaming_body.py tests/functional/leak/__init__.py tests/functional/leak/test_resource_leaks.py @@ -1074,7 +1084,6 @@ tests/unit/protocols/input/query.json tests/unit/protocols/input/rest-json.json tests/unit/protocols/input/rest-xml.json tests/unit/protocols/output/ec2.json -tests/unit/protocols/output/event-stream.json tests/unit/protocols/output/json.json tests/unit/protocols/output/query.json tests/unit/protocols/output/rest-json.json diff --git a/botocore/__init__.py b/botocore/__init__.py index 54fe3c77..af677963 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.12.71' +__version__ = '1.12.103' class NullHandler(logging.Handler): diff --git a/botocore/config.py b/botocore/config.py index bd77eca3..038d17eb 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -35,12 +35,12 @@ class Config(object): :param user_agent_extra: The value to append to the current User-Agent header value. - :type connect_timeout: int + :type connect_timeout: float or int :param connect_timeout: The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds. - :type read_timeout: int + :type read_timeout: float or int :param read_timeout: The time in seconds till a timeout exception is thrown when attempting to read from a connection. The default is 60 seconds. diff --git a/botocore/data/acm-pca/2017-08-22/paginators-1.json b/botocore/data/acm-pca/2017-08-22/paginators-1.json index 72171533..0de1b931 100644 --- a/botocore/data/acm-pca/2017-08-22/paginators-1.json +++ b/botocore/data/acm-pca/2017-08-22/paginators-1.json @@ -5,6 +5,12 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "CertificateAuthorities" + }, + "ListTags": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" } } } diff --git a/botocore/data/acm-pca/2017-08-22/service-2.json b/botocore/data/acm-pca/2017-08-22/service-2.json index b9907c25..3619b405 100644 --- a/botocore/data/acm-pca/2017-08-22/service-2.json +++ b/botocore/data/acm-pca/2017-08-22/service-2.json @@ -24,6 +24,7 @@ "errors":[ {"shape":"InvalidArgsException"}, {"shape":"InvalidPolicyException"}, + {"shape":"InvalidTagException"}, {"shape":"LimitExceededException"} ], "documentation":"

Creates a private subordinate certificate authority (CA). You must specify the CA configuration, the revocation configuration, the CA type, and an optional idempotency token. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses to sign, and X.500 subject information. The CRL (certificate revocation list) configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this operation returns the Amazon Resource Name (ARN) of the CA.

", @@ -231,6 +232,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidStateException"}, + {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"RequestAlreadyProcessedException"}, {"shape":"RequestInProgressException"}, @@ -564,6 +566,10 @@ "IdempotencyToken":{ "shape":"IdempotencyToken", "documentation":"

Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority multiple times with the same idempotency token within a five minute period, ACM PCA recognizes that you are requesting only one certificate. As a result, ACM PCA issues only one. If you change the idempotency token for each call, however, ACM PCA recognizes that you are requesting multiple certificates.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA.

" } } }, @@ -1170,7 +1176,7 @@ "type":"string", "max":128, "min":1, - "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagList":{ "type":"list", @@ -1182,7 +1188,7 @@ "type":"string", "max":256, "min":0, - "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TooManyTagsException":{ "type":"structure", @@ -1257,5 +1263,5 @@ ] } }, - "documentation":"

You can use the ACM PCA API to create a private certificate authority (CA). You must first call the CreateCertificateAuthority operation. If successful, the operation returns an Amazon Resource Name (ARN) for your private CA. Use this ARN as input to the GetCertificateAuthorityCsr operation to retrieve the certificate signing request (CSR) for your private CA certificate. Sign the CSR using the root or an intermediate CA in your on-premises PKI hierarchy, and call the ImportCertificateAuthorityCertificate to import your signed private CA certificate into ACM PCA.

Use your private CA to issue and revoke certificates. These are private certificates that identify and secure client computers, servers, applications, services, devices, and users over SSLS/TLS connections within your organization. Call the IssueCertificate operation to issue a certificate. Call the RevokeCertificate operation to revoke a certificate.

Certificates issued by your private CA can be trusted only within your organization, not publicly.

Your private CA can optionally create a certificate revocation list (CRL) to track the certificates you revoke. To create a CRL, you must specify a RevocationConfiguration object when you call the CreateCertificateAuthority operation. ACM PCA writes the CRL to an S3 bucket that you specify. You must specify a bucket policy that grants ACM PCA write permission.

You can also call the CreateCertificateAuthorityAuditReport to create an optional audit report that lists every time the CA private key is used. The private key is used for signing when the IssueCertificate or RevokeCertificate operation is called.

" + "documentation":"

You can use the ACM PCA API to create a private certificate authority (CA). You must first call the CreateCertificateAuthority operation. If successful, the operation returns an Amazon Resource Name (ARN) for your private CA. Use this ARN as input to the GetCertificateAuthorityCsr operation to retrieve the certificate signing request (CSR) for your private CA certificate. Sign the CSR using the root or an intermediate CA in your on-premises PKI hierarchy, and call the ImportCertificateAuthorityCertificate to import your signed private CA certificate into ACM PCA.

Use your private CA to issue and revoke certificates. These are private certificates that identify and secure client computers, servers, applications, services, devices, and users over SSLS/TLS connections within your organization. Call the IssueCertificate operation to issue a certificate. Call the RevokeCertificate operation to revoke a certificate.

Certificates issued by your private CA can be trusted only within your organization, not publicly.

Your private CA can optionally create a certificate revocation list (CRL) to track the certificates you revoke. To create a CRL, you must specify a RevocationConfiguration object when you call the CreateCertificateAuthority operation. ACM PCA writes the CRL to an S3 bucket that you specify. You must specify a bucket policy that grants ACM PCA write permission.

You can also call the CreateCertificateAuthorityAuditReport to create an optional audit report, which enumerates all of the issued, valid, expired, and revoked certificates from the CA.

Each ACM PCA API operation has a throttling limit which determines the number of times the operation can be called per second. For more information, see API Rate Limits in ACM PCA in the ACM PCA user guide.

" } diff --git a/botocore/data/acm-pca/2017-08-22/waiters-2.json b/botocore/data/acm-pca/2017-08-22/waiters-2.json index 32c272ec..79bf399b 100644 --- a/botocore/data/acm-pca/2017-08-22/waiters-2.json +++ b/botocore/data/acm-pca/2017-08-22/waiters-2.json @@ -48,6 +48,12 @@ "matcher": "path", "argument": "AuditReportStatus", "expected": "SUCCESS" + }, + { + "state": "failure", + "matcher": "path", + "argument": "AuditReportStatus", + "expected": "FAILED" } ] } diff --git a/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json b/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json index a91b700d..ced5de2f 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json +++ b/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json @@ -41,6 +41,42 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "ListBusinessReportSchedules": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "BusinessReportSchedules" + }, + "ListConferenceProviders": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ConferenceProviders" + }, + "ListDeviceEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DeviceEvents" + }, + "ListSkillsStoreCategories": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CategoryList" + }, + "ListSkillsStoreSkillsByCategory": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SkillsStoreSkills" + }, + "ListSmartHomeAppliances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SmartHomeAppliances" } } } diff --git a/botocore/data/amplify/2017-07-25/paginators-1.json b/botocore/data/amplify/2017-07-25/paginators-1.json index ea142457..f84208e9 100644 --- a/botocore/data/amplify/2017-07-25/paginators-1.json +++ b/botocore/data/amplify/2017-07-25/paginators-1.json @@ -1,3 +1,28 @@ { - "pagination": {} + "pagination": { + "ListApps": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "apps" + }, + "ListBranches": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "branches" + }, + "ListDomainAssociations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "domainAssociations" + }, + "ListJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "jobSummaries" + } + } } diff --git a/botocore/data/apigateway/2015-07-09/paginators-1.json b/botocore/data/apigateway/2015-07-09/paginators-1.json index c7a44f17..221e0489 100644 --- a/botocore/data/apigateway/2015-07-09/paginators-1.json +++ b/botocore/data/apigateway/2015-07-09/paginators-1.json @@ -71,6 +71,42 @@ "limit_key": "limit", "output_token": "position", "result_key": "items" + }, + "GetAuthorizers": { + "input_token": "position", + "limit_key": "limit", + "output_token": "position", + "result_key": "items" + }, + "GetDocumentationParts": { + "input_token": "position", + "limit_key": "limit", + "output_token": "position", + "result_key": "items" + }, + "GetDocumentationVersions": { + "input_token": "position", + "limit_key": "limit", + "output_token": "position", + "result_key": "items" + }, + "GetGatewayResponses": { + "input_token": "position", + "limit_key": "limit", + "output_token": "position", + "result_key": "items" + }, + "GetRequestValidators": { + "input_token": "position", + "limit_key": "limit", + "output_token": "position", + "result_key": "items" + }, + "GetSdkTypes": { + "input_token": "position", + "limit_key": "limit", + "output_token": "position", + "result_key": "items" } } } diff --git a/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json b/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json index 7b699c54..eea6c940 100644 --- a/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json +++ b/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json @@ -40,7 +40,7 @@ "shapes" : { "Data" : { "type" : "blob", - "max" : "131072", + "max" : 131072, "documentation" : "

The data to be sent to the client specified by its connection id.

" }, "ForbiddenException" : { @@ -106,4 +106,4 @@ } }, "documentation" : "

The Amazon API Gateway Management API allows you to directly manage runtime aspects of your deployed APIs. To use it, you must explicitly set the SDK's endpoint to point to the endpoint of your deployed API. The endpoint will be of the form https://{api-id}.execute-api.{region}.amazonaws.com/{stage}, or will be the endpoint corresponding to your API's custom domain and base path, if applicable.

" -} \ No newline at end of file +} diff --git a/botocore/data/apigatewayv2/2018-11-29/paginators-1.json b/botocore/data/apigatewayv2/2018-11-29/paginators-1.json index ea142457..2f57dd2c 100644 --- a/botocore/data/apigatewayv2/2018-11-29/paginators-1.json +++ b/botocore/data/apigatewayv2/2018-11-29/paginators-1.json @@ -1,3 +1,64 @@ { - "pagination": {} + "pagination": { + "GetApis": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetAuthorizers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetDeployments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetDomainNames": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetIntegrationResponses": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetIntegrations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetModels": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetRouteResponses": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetRoutes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "GetStages": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + } + } } diff --git a/botocore/data/application-autoscaling/2016-02-06/paginators-1.json b/botocore/data/application-autoscaling/2016-02-06/paginators-1.json index 178af24d..7ec8f3af 100644 --- a/botocore/data/application-autoscaling/2016-02-06/paginators-1.json +++ b/botocore/data/application-autoscaling/2016-02-06/paginators-1.json @@ -17,6 +17,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "ScalingPolicies" + }, + "DescribeScheduledActions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScheduledActions" } } } diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index 6ea4e64a..9759533c 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -59,7 +59,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deregisters a scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

" + "documentation":"

Deregisters a scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

" }, "DescribeScalableTargets":{ "name":"DescribeScalableTargets", @@ -142,7 +142,7 @@ {"shape":"FailedResourceAccessException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you register the scalable target using RegisterScalableTarget.

To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.

You can view the scaling policies for a service namespace using DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

" + "documentation":"

Creates or updates a policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target using RegisterScalableTarget.

To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.

You can view the scaling policies for a service namespace using DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale in and scale out. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

Learn more about how to work with scaling policies in the Application Auto Scaling User Guide.

" }, "PutScheduledAction":{ "name":"PutScheduledAction", @@ -159,7 +159,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you register the scalable target using RegisterScalableTarget.

To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.

You can view the scheduled actions using DescribeScheduledActions. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

" + "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target using RegisterScalableTarget.

To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.

You can view the scheduled actions using DescribeScheduledActions. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

Learn more about how to work with scheduled actions in the Application Auto Scaling User Guide.

" }, "RegisterScalableTarget":{ "name":"RegisterScalableTarget", @@ -175,7 +175,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out or scale in. After you have registered a scalable target, you can use this operation to update the minimum and maximum values for its scalable dimension.

After you register a scalable target, you can create and apply scaling policies using PutScalingPolicy. You can view the scaling policies for a service namespace using DescribeScalableTargets. If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget.

" + "documentation":"

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Each scalable target has a resource ID, scalable dimension, and namespace, as well as values for minimum and maximum capacity.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget.

" } }, "shapes":{ @@ -228,7 +228,7 @@ "members":{ "MetricName":{ "shape":"MetricName", - "documentation":"

The name of the metric.

" + "documentation":"

The name of the metric.

" }, "Namespace":{ "shape":"MetricNamespace", @@ -236,7 +236,7 @@ }, "Dimensions":{ "shape":"MetricDimensions", - "documentation":"

The dimensions of the metric.

" + "documentation":"

The dimensions of the metric.

Conditional: If you published your metric with dimensions, you must specify the same dimensions in your scaling policy.

" }, "Statistic":{ "shape":"MetricStatistic", @@ -247,7 +247,7 @@ "documentation":"

The unit of the metric.

" } }, - "documentation":"

Configures a customized metric for a target tracking policy.

" + "documentation":"

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling.

To create your customized metric specification:

For more information about CloudWatch, see Amazon CloudWatch Concepts.

" }, "DeleteScalingPolicyRequest":{ "type":"structure", @@ -268,11 +268,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" } } }, @@ -286,7 +286,8 @@ "required":[ "ServiceNamespace", "ScheduledActionName", - "ResourceId" + "ResourceId", + "ScalableDimension" ], "members":{ "ServiceNamespace":{ @@ -299,11 +300,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" } } }, @@ -326,11 +327,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" } } }, @@ -349,11 +350,11 @@ }, "ResourceIds":{ "shape":"ResourceIdsMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -388,11 +389,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -431,11 +432,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -474,11 +475,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -510,7 +511,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Failed access to resources caused an exception. This exception is thrown when Application Auto Scaling is unable to retrieve the alarms associated with a scaling policy due to a client error, for example, if the role ARN specified for a scalable target does not have permission to call the CloudWatch DescribeAlarms on your behalf.

", + "documentation":"

Failed access to resources caused an exception. This exception is thrown when Application Auto Scaling is unable to retrieve the alarms associated with a scaling policy due to a client error, for example, if the role ARN specified for a scalable target does not have permission to call the CloudWatch DescribeAlarms on your behalf.

", "exception":true }, "InternalServiceException":{ @@ -534,7 +535,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

A per-account resource limit is exceeded. For more information, see Application Auto Scaling Limits.

", + "documentation":"

A per-account resource limit is exceeded. For more information, see Application Auto Scaling Limits.

", "exception":true }, "MaxResults":{"type":"integer"}, @@ -562,7 +563,7 @@ "documentation":"

The value of the dimension.

" } }, - "documentation":"

Describes the dimension of a metric.

" + "documentation":"

Describes the dimension names and values associated with a metric.

" }, "MetricDimensionName":{"type":"string"}, "MetricDimensionValue":{"type":"string"}, @@ -635,7 +636,7 @@ "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot fleet request or ECS service.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

" } }, - "documentation":"

Configures a predefined metric for a target tracking policy.

" + "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

" }, "PutScalingPolicyRequest":{ "type":"structure", @@ -656,15 +657,15 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type. This parameter is required if you are creating a policy.

For DynamoDB, only TargetTrackingScaling is supported. For Amazon ECS, Spot Fleet, and Amazon RDS, both StepScaling and TargetTrackingScaling are supported. For any other service, only StepScaling is supported.

" + "documentation":"

The policy type. This parameter is required if you are creating a scaling policy.

For information on which services do not support StepScaling or TargetTrackingScaling, see Step Scaling Policies for Application Auto Scaling and Target Tracking Scaling Policies for Application Auto Scaling in the Application Auto Scaling User Guide.

" }, "StepScalingPolicyConfiguration":{ "shape":"StepScalingPolicyConfiguration", @@ -672,7 +673,7 @@ }, "TargetTrackingScalingPolicyConfiguration":{ "shape":"TargetTrackingScalingPolicyConfiguration", - "documentation":"

A target tracking policy.

This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling.

" + "documentation":"

A target tracking scaling policy. Includes support for predefined or customized metrics.

This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling.

" } } }, @@ -686,7 +687,7 @@ }, "Alarms":{ "shape":"Alarms", - "documentation":"

The CloudWatch alarms created for the target tracking policy.

" + "documentation":"

The CloudWatch alarms created for the target tracking scaling policy.

" } } }, @@ -695,7 +696,8 @@ "required":[ "ServiceNamespace", "ScheduledActionName", - "ResourceId" + "ResourceId", + "ScalableDimension" ], "members":{ "ServiceNamespace":{ @@ -704,7 +706,7 @@ }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" }, "ScheduledActionName":{ "shape":"ScheduledActionName", @@ -712,11 +714,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This parameter is required if you are creating a scheduled action. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "StartTime":{ "shape":"TimestampType", @@ -751,23 +753,23 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum value to scale to in response to a scale in event. This parameter is required if you are registering a scalable target.

" + "documentation":"

The minimum value to scale to in response to a scale-in event. This parameter is required to register a scalable target.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The maximum value to scale to in response to a scale out event. This parameter is required if you are registering a scalable target.

" + "documentation":"

The maximum value to scale to in response to a scale-out event. This parameter is required to register a scalable target.

" }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" + "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" } } }, @@ -830,19 +832,19 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum value to scale to in response to a scale in event.

" + "documentation":"

The minimum value to scale to in response to a scale-in event.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The maximum value to scale to in response to a scale out event.

" + "documentation":"

The maximum value to scale to in response to a scale-out event.

" }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", @@ -900,11 +902,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "Description":{ "shape":"XmlString", @@ -979,11 +981,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "PolicyType":{ "shape":"PolicyType", @@ -995,7 +997,7 @@ }, "TargetTrackingScalingPolicyConfiguration":{ "shape":"TargetTrackingScalingPolicyConfiguration", - "documentation":"

A target tracking policy.

" + "documentation":"

A target tracking scaling policy.

" }, "Alarms":{ "shape":"Alarms", @@ -1006,7 +1008,7 @@ "documentation":"

The Unix timestamp for when the scaling policy was created.

" } }, - "documentation":"

Represents a scaling policy.

" + "documentation":"

Represents a scaling policy to use with Application Auto Scaling.

" }, "ScheduledAction":{ "type":"structure", @@ -1033,15 +1035,15 @@ }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "StartTime":{ "shape":"TimestampType", @@ -1121,18 +1123,18 @@ }, "MinAdjustmentMagnitude":{ "shape":"MinAdjustmentMagnitude", - "documentation":"

The minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount.

" + "documentation":"

The minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount.

For example, suppose that you create a step scaling policy to scale out an Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the service has 4 tasks and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Application Auto Scaling scales out the service by 2 tasks.

" }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.

For scale out policies, while the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a cooldown period of 5 minutes starts. During the Cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale out event are considered part of that capacity and only 1 additional task is added to the desired count.

For scale in policies, the cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.

For scale-out policies, while the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a cooldown period of 5 minutes starts. During the cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale-out event are considered part of that capacity and only 1 additional task is added to the desired count.

For scale-in policies, the cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" }, "MetricAggregationType":{ "shape":"MetricAggregationType", - "documentation":"

The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average.

" + "documentation":"

The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

" } }, - "documentation":"

Represents a step scaling policy configuration.

" + "documentation":"

Represents a step scaling policy configuration to use with Application Auto Scaling.

" }, "TargetTrackingScalingPolicyConfiguration":{ "type":"structure", @@ -1144,26 +1146,26 @@ }, "PredefinedMetricSpecification":{ "shape":"PredefinedMetricSpecification", - "documentation":"

A predefined metric.

" + "documentation":"

A predefined metric. You can specify either a predefined metric or a customized metric.

" }, "CustomizedMetricSpecification":{ "shape":"CustomizedMetricSpecification", - "documentation":"

A customized metric.

" + "documentation":"

A customized metric. You can specify either a predefined metric or a customized metric.

" }, "ScaleOutCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scale out activity completes before another scale out activity can start.

While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.

" + "documentation":"

The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start.

While the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.

" }, "ScaleInCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scale in activity completes before another scale in activity can start.

The cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" + "documentation":"

The amount of time, in seconds, after a scale-in activity completes before another scale in activity can start.

The cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" }, "DisableScaleIn":{ "shape":"DisableScaleIn", - "documentation":"

Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.

" + "documentation":"

Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true, scale in is disabled and the target tracking scaling policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable resource. The default value is false.

" } }, - "documentation":"

Represents a target tracking scaling policy configuration.

" + "documentation":"

Represents a target tracking scaling policy configuration to use with Application Auto Scaling.

" }, "TimestampType":{"type":"timestamp"}, "ValidationException":{ @@ -1179,5 +1181,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can configure automatic scaling for your scalable resources. You can use Application Auto Scaling to accomplish the following tasks:

Application Auto Scaling can scale the following resources:

To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

To configure automatic scaling for multiple resources across multiple services, use AWS Auto Scaling to create a scaling plan for your application. For more information, see the AWS Auto Scaling User Guide.

" + "documentation":"

With Application Auto Scaling, you can configure automatic scaling for your scalable resources. You can use Application Auto Scaling to accomplish the following tasks:

Application Auto Scaling can scale the following resources:

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" } diff --git a/botocore/data/appmesh/2018-10-01/service-2.json b/botocore/data/appmesh/2018-10-01/service-2.json index d0fb2b1a..20ed19ea 100644 --- a/botocore/data/appmesh/2018-10-01/service-2.json +++ b/botocore/data/appmesh/2018-10-01/service-2.json @@ -11,7 +11,7 @@ "signingName": "appmesh", "uid": "appmesh-2018-10-01" }, - "documentation": "

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and\n control containerized microservices. App Mesh standardizes how your microservices\n communicate, giving you end-to-end visibility and helping to ensure high-availability for\n your applications.

\n

App Mesh gives you consistent visibility and network traffic controls for every\n microservice in an application. You can use App Mesh with Amazon ECS\n (using the Amazon EC2 launch type), Amazon EKS, and Kubernetes on AWS.

\n \n

App Mesh supports containerized microservice applications that use service discovery\n naming for their components. To use App Mesh, you must have a containerized application\n running on Amazon EC2 instances, hosted in either Amazon ECS, Amazon EKS, or Kubernetes on AWS. For\n more information about service discovery on Amazon ECS, see Service Discovery in the\n Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns is supported.\n For more information, see DNS\n for Services and Pods in the Kubernetes documentation.

\n
", + "documentation": "

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and\n control containerized microservices. App Mesh standardizes how your microservices\n communicate, giving you end-to-end visibility and helping to ensure high-availability for\n your applications.

\n

App Mesh gives you consistent visibility and network traffic controls for every\n microservice in an application. You can use App Mesh with Amazon ECS\n (using the Amazon EC2 launch type), Amazon EKS, and Kubernetes on AWS.

\n \n

App Mesh supports containerized microservice applications that use service discovery\n naming for their components. To use App Mesh, you must have a containerized application\n running on Amazon EC2 instances, hosted in either Amazon ECS, Amazon EKS, or Kubernetes on AWS. For\n more information about service discovery on Amazon ECS, see Service Discovery in the\n Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns is supported.\n For more information, see DNS\n for Services and Pods in the Kubernetes documentation.

\n
", "operations": { "CreateMesh": { "name": "CreateMesh", @@ -370,7 +370,7 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Describes an existing cluster.

" + "documentation": "

Describes an existing service mesh.

" }, "DescribeRoute": { "name": "DescribeRoute", @@ -763,6 +763,11 @@ "fault": true } }, + "HealthCheckThreshold": { + "type": "integer", + "min": 2, + "max": 10 + }, "DeleteMeshOutput": { "type": "structure", "members": { @@ -907,6 +912,12 @@ "senderFault": true } }, + "HealthCheckIntervalMillis": { + "type": "long", + "box": true, + "min": 5000, + "max": 300000 + }, "VirtualNodeRef": { "type": "structure", "members": { @@ -1011,14 +1022,6 @@ }, "max": 10 }, - "DurationMillis": { - "type": "long", - "box": true - }, - "NonNegativeInt": { - "type": "integer", - "min": 0 - }, "MeshRef": { "type": "structure", "members": { @@ -1113,6 +1116,12 @@ "documentation": "", "payload": "virtualRouter" }, + "HealthCheckTimeoutMillis": { + "type": "long", + "box": true, + "min": 2000, + "max": 60000 + }, "CreateVirtualRouterInput": { "type": "structure", "required": [ @@ -1672,11 +1681,11 @@ }, "createdAt": { "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the cluster was created.

" + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" }, "lastUpdatedAt": { "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the cluster was last updated.

" + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, "uid": { "shape": "String", @@ -1878,7 +1887,7 @@ "members": { "healthCheck": { "shape": "HealthCheckPolicy", - "documentation": "

The health check information for the listener.

\n \n

Listener health checks are not available during the App Mesh preview.

\n
" + "documentation": "

The health check information for the listener.

" }, "portMapping": { "shape": "PortMapping", @@ -1892,37 +1901,44 @@ }, "HealthCheckPolicy": { "type": "structure", + "required": [ + "healthyThreshold", + "intervalMillis", + "protocol", + "timeoutMillis", + "unhealthyThreshold" + ], "members": { "healthyThreshold": { - "shape": "NonNegativeInt", + "shape": "HealthCheckThreshold", "documentation": "

The number of consecutive successful health checks that must occur before declaring\n listener healthy.

" }, "intervalMillis": { - "shape": "DurationMillis", + "shape": "HealthCheckIntervalMillis", "documentation": "

The time period in milliseconds between each health check execution.

" }, "path": { "shape": "String", - "documentation": "

The destination path for the health check request.

" + "documentation": "

The destination path for the health check request. This is only required if the\n specified protocol is HTTP; if the protocol is TCP, then this parameter is ignored.

" }, "port": { "shape": "PortNumber", - "documentation": "

The destination port for the health check request.

" + "documentation": "

The destination port for the health check request. This port must match the port defined\n in the PortMapping for the listener.

" }, "protocol": { "shape": "PortProtocol", "documentation": "

The protocol for the health check request.

" }, "timeoutMillis": { - "shape": "DurationMillis", + "shape": "HealthCheckTimeoutMillis", "documentation": "

The amount of time to wait when receiving a response from the health check, in\n milliseconds.

" }, "unhealthyThreshold": { - "shape": "NonNegativeInt", + "shape": "HealthCheckThreshold", "documentation": "

The number of consecutive failed health checks that must occur before declaring a\n virtual node unhealthy.

" } }, - "documentation": "

An object representing the health check policy for a virtual node's listener.

\n \n

Listener health checks are not available during the App Mesh preview.

\n
" + "documentation": "

An object representing the health check policy for a virtual node's listener.

" }, "ListVirtualRoutersInput": { "type": "structure", diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 798ecc11..60b71525 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -376,7 +376,7 @@ "errors":[ {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Retrieves a list that describes the streaming sessions for a specified stack and fleet. If a user ID is provided for the stack and fleet, only streaming sessions for that user are described. If an authentication type is not provided, the default is to authenticate users using a streaming URL.

" + "documentation":"

Retrieves a list that describes the active streaming sessions for a specified stack and fleet. If a value for UserId is provided for the stack and fleet, only streaming sessions for that user are described. If an authentication type is not provided, the default is to authenticate users using a streaming URL.

" }, "DescribeStacks":{ "name":"DescribeStacks", @@ -416,7 +416,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Retrieves a list that describes one or more specified users in the user pool, if user names are provided. Otherwise, all users in the user pool are described.

" + "documentation":"

Retrieves a list that describes one or more specified users in the user pool.

" }, "DisableUser":{ "name":"DisableUser", @@ -501,7 +501,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "StartFleet":{ "name":"StartFleet", @@ -579,7 +579,7 @@ {"shape":"InvalidAccountStatusException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -592,7 +592,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "UpdateDirectoryConfig":{ "name":"UpdateDirectoryConfig", @@ -698,7 +698,7 @@ }, "DisplayName":{ "shape":"String", - "documentation":"

The application name for display.

" + "documentation":"

The application name to display.

" }, "IconURL":{ "shape":"String", @@ -999,19 +999,19 @@ }, "MaxUserDurationInSeconds":{ "shape":"Integer", - "documentation":"

The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.

" + "documentation":"

The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 360000.

" }, "DisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.

" + "documentation":"

The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 360000.

" }, "Description":{ "shape":"Description", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

The fleet name for display.

" + "documentation":"

The fleet name to display.

" }, "EnableDefaultInternetAccess":{ "shape":"BooleanObject", @@ -1019,7 +1019,11 @@ }, "DomainJoinInfo":{ "shape":"DomainJoinInfo", - "documentation":"

The information needed to join a Microsoft Active Directory domain.

" + "documentation":"

The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to associate with the fleet. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

For more information, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" } } }, @@ -1045,7 +1049,7 @@ }, "ImageName":{ "shape":"String", - "documentation":"

The name of the image used to create the builder.

" + "documentation":"

The name of the image used to create the image builder.

" }, "ImageArn":{ "shape":"Arn", @@ -1057,11 +1061,11 @@ }, "Description":{ "shape":"Description", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

The image builder name for display.

" + "documentation":"

The image builder name to display.

" }, "VpcConfig":{ "shape":"VpcConfig", @@ -1073,11 +1077,15 @@ }, "DomainJoinInfo":{ "shape":"DomainJoinInfo", - "documentation":"

The information needed to join a Microsoft Active Directory domain.

" + "documentation":"

The name of the directory and organizational unit (OU) to use to join the image builder to a Microsoft Active Directory domain.

" }, "AppstreamAgentVersion":{ "shape":"AppstreamAgentVersion", "documentation":"

The version of the AppStream 2.0 agent to use for this image builder. To use the latest version of the AppStream 2.0 agent, specify [LATEST].

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to associate with the image builder. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" } } }, @@ -1127,11 +1135,11 @@ }, "Description":{ "shape":"Description", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

The stack name for display.

" + "documentation":"

The stack name to display.

" }, "StorageConnectors":{ "shape":"StorageConnectorList", @@ -1152,6 +1160,10 @@ "ApplicationSettings":{ "shape":"ApplicationSettings", "documentation":"

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to associate with the stack. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" } } }, @@ -1182,7 +1194,7 @@ }, "UserId":{ "shape":"StreamingUrlUserId", - "documentation":"

The ID of the user.

" + "documentation":"

The identifier of the user.

" }, "ApplicationId":{ "shape":"String", @@ -1194,7 +1206,7 @@ }, "SessionContext":{ "shape":"String", - "documentation":"

The session context. For more information, see Session Context in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

The session context. For more information, see Session Context in the Amazon AppStream 2.0 Developer Guide.

" } } }, @@ -1307,7 +1319,7 @@ }, "SharedAccountId":{ "shape":"AwsAccountId", - "documentation":"

The 12-digit ID of the AWS account for which to delete image permissions.

" + "documentation":"

The 12-digit identifier of the AWS account for which to delete image permissions.

" } } }, @@ -1472,7 +1484,7 @@ }, "SharedAwsAccountIds":{ "shape":"AwsAccountIdList", - "documentation":"

The 12-digit ID of one or more AWS accounts with which the image is shared.

" + "documentation":"

The 12-digit identifier of one or more AWS accounts with which the image is shared.

" }, "NextToken":{ "shape":"String", @@ -1558,7 +1570,7 @@ }, "UserId":{ "shape":"UserId", - "documentation":"

The user ID.

" + "documentation":"

The user identifier.

" }, "NextToken":{ "shape":"String", @@ -1570,7 +1582,7 @@ }, "AuthenticationType":{ "shape":"AuthenticationType", - "documentation":"

The authentication method. Specify API for a user authenticated using a streaming URL or SAML for a SAML federated user. The default is to authenticate users using a streaming URL.

" + "documentation":"

The authentication method. Specify API for a user authenticated using a streaming URL, SAML for a SAML 2.0-federated user, or USERPOOL for a user in the AppStream 2.0 user pool. The default is to authenticate users using a streaming URL.

" } } }, @@ -1707,7 +1719,7 @@ "documentation":"

The time the directory configuration was created.

" } }, - "documentation":"

Configuration information for the directory used to join domains.

" + "documentation":"

Describes the configuration information for the directory used to join a streaming instance to a Microsoft Active Directory domain.

" }, "DirectoryConfigList":{ "type":"list", @@ -1783,7 +1795,7 @@ "documentation":"

The distinguished name of the organizational unit for computer accounts.

" } }, - "documentation":"

Contains the information needed to join a Microsoft Active Directory domain.

" + "documentation":"

Describes the configuration information required to join fleets and image builders to Microsoft Active Directory domains.

" }, "DomainList":{ "type":"list", @@ -1822,7 +1834,7 @@ "members":{ "SessionId":{ "shape":"String", - "documentation":"

The ID of the streaming session.

" + "documentation":"

The identifier of the streaming session.

" } } }, @@ -1855,11 +1867,11 @@ }, "DisplayName":{ "shape":"String", - "documentation":"

The fleet name for display.

" + "documentation":"

The fleet name to display.

" }, "Description":{ "shape":"String", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "ImageName":{ "shape":"String", @@ -1883,11 +1895,11 @@ }, "MaxUserDurationInSeconds":{ "shape":"Integer", - "documentation":"

The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.

" + "documentation":"

The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 360000.

" }, "DisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.

" + "documentation":"

The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 360000. By default, this value is 900 seconds (15 minutes).

" }, "State":{ "shape":"FleetState", @@ -1911,10 +1923,10 @@ }, "DomainJoinInfo":{ "shape":"DomainJoinInfo", - "documentation":"

The information needed to join a Microsoft Active Directory domain.

" + "documentation":"

The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain.

" } }, - "documentation":"

Contains the parameters for a fleet.

" + "documentation":"

Describes the parameters for a fleet.

" }, "FleetAttribute":{ "type":"string", @@ -2018,7 +2030,7 @@ }, "DisplayName":{ "shape":"String", - "documentation":"

The image name for display.

" + "documentation":"

The image name to display.

" }, "State":{ "shape":"ImageState", @@ -2038,7 +2050,7 @@ }, "Description":{ "shape":"String", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "StateChangeReason":{ "shape":"ImageStateChangeReason", @@ -2085,11 +2097,11 @@ }, "Description":{ "shape":"String", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "DisplayName":{ "shape":"String", - "documentation":"

The image builder name for display.

" + "documentation":"

The image builder name to display.

" }, "VpcConfig":{ "shape":"VpcConfig", @@ -2121,7 +2133,7 @@ }, "DomainJoinInfo":{ "shape":"DomainJoinInfo", - "documentation":"

The information needed to join a Microsoft Active Directory domain.

" + "documentation":"

The name of the directory and organizational unit (OU) to use to join the image builder to a Microsoft Active Directory domain.

" }, "ImageBuilderErrors":{ "shape":"ResourceErrors", @@ -2129,10 +2141,10 @@ }, "AppstreamAgentVersion":{ "shape":"AppstreamAgentVersion", - "documentation":"

The version of the AppStream 2.0 agent that is currently being used by this image builder.

" + "documentation":"

The version of the AppStream 2.0 agent that is currently being used by the image builder.

" } }, - "documentation":"

Describes a streaming instance used for editing an image. New images are created from a snapshot through an image builder.

" + "documentation":"

Describes a virtual machine that is used to create an image.

" }, "ImageBuilderList":{ "type":"list", @@ -2372,7 +2384,7 @@ "documentation":"

The resource identifier of the elastic network interface that is attached to instances in your VPC. All network interfaces have the eni-xxxxxxxx resource identifier.

" } }, - "documentation":"

The network details of the fleet instance for the streaming session.

" + "documentation":"

Describes the network details of the fleet instance for the streaming session.

" }, "OperationNotPermittedException":{ "type":"structure", @@ -2472,7 +2484,7 @@ "SecurityGroupIdList":{ "type":"list", "member":{"shape":"String"}, - "documentation":"

The security group IDs.

", + "documentation":"

The security group identifiers.

", "max":5 }, "ServiceAccountCredentials":{ @@ -2505,7 +2517,7 @@ "members":{ "Id":{ "shape":"String", - "documentation":"

The ID of the streaming session.

" + "documentation":"

The identifier of the streaming session.

" }, "UserId":{ "shape":"UserId", @@ -2523,9 +2535,21 @@ "shape":"SessionState", "documentation":"

The current state of the streaming session.

" }, + "ConnectionState":{ + "shape":"SessionConnectionState", + "documentation":"

Specifies whether a user is connected to the streaming session.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time when a streaming instance is dedicated for the user.

" + }, + "MaxExpirationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the streaming session is set to expire. This time is based on the MaxUserDurationinSeconds value, which determines the maximum length of time that a streaming session can run. A streaming session might end earlier than the time specified in SessionMaxExpirationTime, when the DisconnectTimeOutInSeconds elapses or the user chooses to end his or her session. If the DisconnectTimeOutInSeconds elapses, or the user chooses to end his or her session, the streaming instance is terminated and the streaming session ends.

" + }, "AuthenticationType":{ "shape":"AuthenticationType", - "documentation":"

The authentication method. The user is authenticated using a streaming URL (API) or SAML federation (SAML).

" + "documentation":"

The authentication method. The user is authenticated using a streaming URL (API), SAML 2.0 federation (SAML), or the AppStream 2.0 user pool (USERPOOL). The default is to authenticate users using a streaming URL.

" }, "NetworkAccessConfiguration":{ "shape":"NetworkAccessConfiguration", @@ -2534,6 +2558,13 @@ }, "documentation":"

Describes a streaming session.

" }, + "SessionConnectionState":{ + "type":"string", + "enum":[ + "CONNECTED", + "NOT_CONNECTED" + ] + }, "SessionList":{ "type":"list", "member":{"shape":"Session"}, @@ -2541,7 +2572,6 @@ }, "SessionState":{ "type":"string", - "documentation":"

Possible values for the state of a streaming session.

", "enum":[ "ACTIVE", "PENDING", @@ -2561,7 +2591,7 @@ "members":{ "sharedAccountId":{ "shape":"AwsAccountId", - "documentation":"

The 12-digit ID of the AWS account with which the image is shared.

" + "documentation":"

The 12-digit identifier of the AWS account with which the image is shared.

" }, "imagePermissions":{ "shape":"ImagePermissions", @@ -2588,11 +2618,11 @@ }, "Description":{ "shape":"String", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "DisplayName":{ "shape":"String", - "documentation":"

The stack name for display.

" + "documentation":"

The stack name to display.

" }, "CreatedTime":{ "shape":"Timestamp", @@ -2759,7 +2789,7 @@ }, "Domains":{ "shape":"DomainList", - "documentation":"

The names of the domains for the G Suite account.

" + "documentation":"

The names of the domains for the account.

" } }, "documentation":"

Describes a connector to enable persistent storage for users.

" @@ -2795,7 +2825,7 @@ "SubnetIdList":{ "type":"list", "member":{"shape":"String"}, - "documentation":"

The subnet IDs.

" + "documentation":"

The subnet identifiers.

" }, "TagKey":{ "type":"string", @@ -2822,7 +2852,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to associate. A tag is a key-value pair (the value is optional). For example, Environment=Test, or, if you do not specify a value, Environment=.

If you do not specify a value, we set the value to an empty string.

" + "documentation":"

The tags to associate. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

" } } }, @@ -2923,11 +2953,11 @@ }, "MaxUserDurationInSeconds":{ "shape":"Integer", - "documentation":"

The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.

" + "documentation":"

The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 360000. By default, the value is 900 seconds (15 minutes).

" }, "DisconnectTimeoutInSeconds":{ "shape":"Integer", - "documentation":"

The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.

" + "documentation":"

The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 360000. By default, the value is 900 seconds (15 minutes).

" }, "DeleteVpcConfig":{ "shape":"Boolean", @@ -2936,11 +2966,11 @@ }, "Description":{ "shape":"Description", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

The fleet name for display.

" + "documentation":"

The fleet name to display.

" }, "EnableDefaultInternetAccess":{ "shape":"BooleanObject", @@ -2948,7 +2978,7 @@ }, "DomainJoinInfo":{ "shape":"DomainJoinInfo", - "documentation":"

The information needed to join a Microsoft Active Directory domain.

" + "documentation":"

The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain.

" }, "AttributesToDelete":{ "shape":"FleetAttributes", @@ -2979,7 +3009,7 @@ }, "SharedAccountId":{ "shape":"AwsAccountId", - "documentation":"

The 12-digit ID of the AWS account for which you want add or update image permissions.

" + "documentation":"

The 12-digit identifier of the AWS account for which you want add or update image permissions.

" }, "ImagePermissions":{ "shape":"ImagePermissions", @@ -2998,11 +3028,11 @@ "members":{ "DisplayName":{ "shape":"DisplayName", - "documentation":"

The stack name for display.

" + "documentation":"

The stack name to display.

" }, "Description":{ "shape":"Description", - "documentation":"

The description for display.

" + "documentation":"

The description to display.

" }, "Name":{ "shape":"String", @@ -3023,7 +3053,7 @@ }, "FeedbackURL":{ "shape":"FeedbackURL", - "documentation":"

The URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed.

" + "documentation":"

The URL that users are redirected to after they choose the Send Feedback link. If no URL is specified, no Send Feedback link is displayed.

" }, "AttributesToDelete":{ "shape":"StackAttributes", @@ -3206,15 +3236,15 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

The subnets to which a network interface is established from the fleet instance.

" + "documentation":"

The identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. Fleet instances use one or two subnets. Image builder instances use one subnet.

" }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

The security groups for the fleet.

" + "documentation":"

The identifiers of the security groups for the fleet or image builder.

" } }, - "documentation":"

Describes VPC configuration information.

" + "documentation":"

Describes VPC configuration information for fleets and image builders.

" } }, - "documentation":"Amazon AppStream 2.0

You can use Amazon AppStream 2.0 to stream desktop applications to any device running a web browser, without rewriting them.

" + "documentation":"Amazon AppStream 2.0

This is the Amazon AppStream 2.0 API Reference. This reference provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed application streaming service. You centrally manage your desktop applications on AppStream 2.0 and securely deliver them to any computer. AppStream 2.0 manages the AWS resources required to host and run your applications, scales automatically, and provides access to your users on demand.

To learn more about AppStream 2.0, see the following resources:

" } diff --git a/botocore/data/appsync/2017-07-25/paginators-1.json b/botocore/data/appsync/2017-07-25/paginators-1.json index ea142457..487d71e6 100644 --- a/botocore/data/appsync/2017-07-25/paginators-1.json +++ b/botocore/data/appsync/2017-07-25/paginators-1.json @@ -1,3 +1,46 @@ { - "pagination": {} + "pagination": { + "ListApiKeys": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "apiKeys" + }, + "ListDataSources": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "dataSources" + }, + "ListFunctions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "functions" + }, + "ListGraphqlApis": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "graphqlApis" + }, + "ListResolvers": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resolvers" + }, + "ListResolversByFunction": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resolvers" + }, + "ListTypes": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "types" + } + } } diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index 1993f2a1..adc47263 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -24,7 +24,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns the details of a single named query or a list of up to 50 queries, which you provide as an array of query ID strings. Use ListNamedQueries to get the list of named query IDs. If information could not be retrieved for a submitted query ID, information about the query ID submitted is listed under UnprocessedNamedQueryId. Named queries are different from executed queries. Use BatchGetQueryExecution to get details about each unique query execution, and ListQueryExecutions to get a list of query execution IDs.

" + "documentation":"

Returns the details of a single named query or a list of up to 50 queries, which you provide as an array of query ID strings. Requires you to have access to the workgroup in which the queries were saved. Use ListNamedQueriesInput to get the list of named query IDs in the specified workgroup. If information could not be retrieved for a submitted query ID, information about the query ID submitted is listed under UnprocessedNamedQueryId. Named queries differ from executed queries. Use BatchGetQueryExecutionInput to get details about each unique query execution, and ListQueryExecutionsInput to get a list of query execution IDs.

" }, "BatchGetQueryExecution":{ "name":"BatchGetQueryExecution", @@ -38,7 +38,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns the details of a single query execution or a list of up to 50 query executions, which you provide as an array of query execution ID strings. To get a list of query execution IDs, use ListQueryExecutions. Query executions are different from named (saved) queries. Use BatchGetNamedQuery to get details about named queries.

" + "documentation":"

Returns the details of a single query execution or a list of up to 50 query executions, which you provide as an array of query execution ID strings. Requires you to have access to the workgroup in which the queries ran. To get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput to get details about named queries.

" }, "CreateNamedQuery":{ "name":"CreateNamedQuery", @@ -52,9 +52,23 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Creates a named query.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Creates a named query in the specified workgroup. Requires that you have access to the workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, + "CreateWorkGroup":{ + "name":"CreateWorkGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkGroupInput"}, + "output":{"shape":"CreateWorkGroupOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Creates a workgroup with the specified name.

" + }, "DeleteNamedQuery":{ "name":"DeleteNamedQuery", "http":{ @@ -67,7 +81,22 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes a named query.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Deletes the named query if you have access to the workgroup in which the query was saved.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "idempotent":true + }, + "DeleteWorkGroup":{ + "name":"DeleteWorkGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWorkGroupInput"}, + "output":{"shape":"DeleteWorkGroupOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes the workgroup with the specified name. The primary workgroup cannot be deleted.

", "idempotent":true }, "GetNamedQuery":{ @@ -82,7 +111,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns information about a single query.

" + "documentation":"

Returns information about a single query. Requires that you have access to the workgroup in which the query was saved.

" }, "GetQueryExecution":{ "name":"GetQueryExecution", @@ -96,7 +125,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns information about a single execution of a query. Each time a query executes, information about the query execution is saved with a unique ID.

" + "documentation":"

Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID.

" }, "GetQueryResults":{ "name":"GetQueryResults", @@ -110,7 +139,21 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns the results of a single query execution specified by QueryExecutionId. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

" + "documentation":"

Returns the results of a single query execution specified by QueryExecutionId if you have access to the workgroup in which the query ran. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

" + }, + "GetWorkGroup":{ + "name":"GetWorkGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWorkGroupInput"}, + "output":{"shape":"GetWorkGroupOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Returns information about the workgroup with the specified name.

" }, "ListNamedQueries":{ "name":"ListNamedQueries", @@ -124,7 +167,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of all available query IDs.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, "ListQueryExecutions":{ "name":"ListQueryExecutions", @@ -138,7 +181,36 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of all available query execution IDs.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Provides a list of available query execution IDs for the queries in the specified workgroup. Requires you to have access to the workgroup in which the queries ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the tags associated with this workgroup.

" + }, + "ListWorkGroups":{ + "name":"ListWorkGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkGroupsInput"}, + "output":{"shape":"ListWorkGroupsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Lists available workgroups for the account.

" }, "StartQueryExecution":{ "name":"StartQueryExecution", @@ -153,7 +225,7 @@ {"shape":"InvalidRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Runs (executes) the SQL query statements contained in the Query string.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "StopQueryExecution":{ @@ -168,11 +240,60 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Stops a query execution.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Stops a query execution. Requires you to have access to the workgroup in which the query ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds one or more tags to the resource, such as a workgroup. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize resources (workgroups) in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. For best practices, see AWS Tagging Strategies. The key length is from 1 (minimum) to 128 (maximum) Unicode characters in UTF-8. The tag value length is from 0 (minimum) to 256 (maximum) Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one, separate them by commas.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes one or more tags from the workgroup resource. Takes as an input a list of TagKey Strings separated by commas, and removes their tags at the same time.

" + }, + "UpdateWorkGroup":{ + "name":"UpdateWorkGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWorkGroupInput"}, + "output":{"shape":"UpdateWorkGroupOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Updates the workgroup with the specified name. The workgroup's name cannot be changed.

" } }, "shapes":{ + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "BatchGetNamedQueryInput":{ "type":"structure", "required":["NamedQueryIds"], @@ -220,6 +341,11 @@ } }, "Boolean":{"type":"boolean"}, + "BoxedBoolean":{"type":"boolean"}, + "BytesScannedCutoffValue":{ + "type":"long", + "min":10000000 + }, "ColumnInfo":{ "type":"structure", "required":[ @@ -292,11 +418,11 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

The plain language name for the query.

" + "documentation":"

The query name.

" }, "Description":{ "shape":"DescriptionString", - "documentation":"

A brief explanation of the query.

" + "documentation":"

The query description.

" }, "Database":{ "shape":"DatabaseString", @@ -304,12 +430,16 @@ }, "QueryString":{ "shape":"QueryString", - "documentation":"

The text of the query itself. In other words, all query statements.

" + "documentation":"

The contents of the query with all query statements.

" }, "ClientRequestToken":{ "shape":"IdempotencyToken", "documentation":"

A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another CreateNamedQuery request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString, an error is returned.

This token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.

", "idempotencyToken":true + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup in which the named query is being created.

" } } }, @@ -322,9 +452,36 @@ } } }, + "CreateWorkGroupInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"WorkGroupName", + "documentation":"

The workgroup name.

" + }, + "Configuration":{ + "shape":"WorkGroupConfiguration", + "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for encrypting query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, the limit for the amount of bytes scanned (cutoff) per query, if it is specified, and whether workgroup's settings (specified with EnforceWorkGroupConfiguration) in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + }, + "Description":{ + "shape":"WorkGroupDescriptionString", + "documentation":"

The workgroup description.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tags, separated by commas, that you want to attach to the workgroup as you create it.

" + } + } + }, + "CreateWorkGroupOutput":{ + "type":"structure", + "members":{ + } + }, "DatabaseString":{ "type":"string", - "max":32, + "max":255, "min":1 }, "Date":{"type":"timestamp"}, @@ -354,6 +511,25 @@ "members":{ } }, + "DeleteWorkGroupInput":{ + "type":"structure", + "required":["WorkGroup"], + "members":{ + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The unique name of the workgroup to delete.

" + }, + "RecursiveDeleteOption":{ + "shape":"BoxedBoolean", + "documentation":"

The option to delete the workgroup and its contents even if the workgroup contains any named queries.

" + } + } + }, + "DeleteWorkGroupOutput":{ + "type":"structure", + "members":{ + } + }, "DescriptionString":{ "type":"string", "max":1024, @@ -365,7 +541,7 @@ "members":{ "EncryptionOption":{ "shape":"EncryptionOption", - "documentation":"

Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3), server-side encryption with KMS-managed keys (SSE-KMS), or client-side encryption with KMS-managed keys (CSE-KMS) is used.

" + "documentation":"

Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3), server-side encryption with KMS-managed keys (SSE-KMS), or client-side encryption with KMS-managed keys (CSE-KMS) is used.

If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup's setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup.

" }, "KmsKey":{ "shape":"String", @@ -462,6 +638,25 @@ } } }, + "GetWorkGroupInput":{ + "type":"structure", + "required":["WorkGroup"], + "members":{ + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup.

" + } + } + }, + "GetWorkGroupOutput":{ + "type":"structure", + "members":{ + "WorkGroup":{ + "shape":"WorkGroup", + "documentation":"

Information about the workgroup.

" + } + } + }, "IdempotencyToken":{ "type":"string", "max":128, @@ -496,6 +691,10 @@ "MaxResults":{ "shape":"MaxNamedQueriesCount", "documentation":"

The maximum number of queries to return in this request.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup from which the named queries are being returned.

" } } }, @@ -522,6 +721,10 @@ "MaxResults":{ "shape":"MaxQueryExecutionsCount", "documentation":"

The maximum number of query executions to return in this request.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup from which queries are being returned.

" } } }, @@ -538,6 +741,63 @@ } } }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

Lists the tags for the workgroup resource with the specified ARN.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

The token for the next set of results, or null if there are no additional results for this request, where the request lists the tags for the workgroup resource with the specified ARN.

" + }, + "MaxResults":{ + "shape":"MaxTagsCount", + "documentation":"

The maximum number of results to be returned per request that lists the tags for the workgroup resource.

" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The list of tags associated with this workgroup.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token to be used by the next request if this request is truncated.

" + } + } + }, + "ListWorkGroupsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token to be used by the next request if this request is truncated.

" + }, + "MaxResults":{ + "shape":"MaxWorkGroupsCount", + "documentation":"

The maximum number of workgroups to return in this request.

" + } + } + }, + "ListWorkGroupsOutput":{ + "type":"structure", + "members":{ + "WorkGroups":{ + "shape":"WorkGroupsList", + "documentation":"

The list of workgroups, including their names, descriptions, creation times, and states.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token to be used by the next request if this request is truncated.

" + } + } + }, "Long":{"type":"long"}, "MaxNamedQueriesCount":{ "type":"integer", @@ -557,6 +817,17 @@ "max":1000, "min":0 }, + "MaxTagsCount":{ + "type":"integer", + "box":true, + "min":75 + }, + "MaxWorkGroupsCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "NameString":{ "type":"string", "max":128, @@ -572,11 +843,11 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

The plain-language name of the query.

" + "documentation":"

The query name.

" }, "Description":{ "shape":"DescriptionString", - "documentation":"

A brief description of the query.

" + "documentation":"

The query description.

" }, "Database":{ "shape":"DatabaseString", @@ -589,9 +860,13 @@ "NamedQueryId":{ "shape":"NamedQueryId", "documentation":"

The unique identifier of the query.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup that contains the named query.

" } }, - "documentation":"

A query, where QueryString is the SQL query statements that comprise the query.

" + "documentation":"

A query, where QueryString is the list of SQL query statements that comprise the query.

" }, "NamedQueryId":{"type":"string"}, "NamedQueryIdList":{ @@ -621,7 +896,7 @@ }, "ResultConfiguration":{ "shape":"ResultConfiguration", - "documentation":"

The location in Amazon S3 where query results were stored and the encryption option, if any, used for query results.

" + "documentation":"

The location in Amazon S3 where query results were stored and the encryption option, if any, used for query results. These are known as \"client-side settings\". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup.

" }, "QueryExecutionContext":{ "shape":"QueryExecutionContext", @@ -634,6 +909,10 @@ "Statistics":{ "shape":"QueryExecutionStatistics", "documentation":"

The amount of data scanned during the query execution and the amount of time that it took to execute, and the type of statement that was run.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup in which the query ran.

" } }, "documentation":"

Information about a single instance of a query execution.

" @@ -688,7 +967,7 @@ "members":{ "State":{ "shape":"QueryExecutionState", - "documentation":"

The state of query execution. QUEUED state is listed but is not used by Athena and is reserved for future use. RUNNING indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. SUCCEEDED indicates that the query completed without error. FAILED indicates that the query experienced an error and did not complete processing.CANCELLED indicates that user input interrupted query execution.

" + "documentation":"

The state of query execution. QUEUED state is listed but is not used by Athena and is reserved for future use. RUNNING indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution.

" }, "StateChangeReason":{ "shape":"String", @@ -710,20 +989,50 @@ "max":262144, "min":1 }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceName":{"shape":"AmazonResourceName"} + }, + "documentation":"

A resource, such as a workgroup, was not found.

", + "exception":true + }, "ResultConfiguration":{ "type":"structure", - "required":["OutputLocation"], "members":{ "OutputLocation":{ "shape":"String", - "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files.

" + "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", - "documentation":"

If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS) and key information.

" + "documentation":"

If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.

" } }, - "documentation":"

The location in Amazon S3 where query results are stored and the encryption option, if any, used for query results.

" + "documentation":"

The location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. These are known as \"client-side settings\". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup.

" + }, + "ResultConfigurationUpdates":{ + "type":"structure", + "members":{ + "OutputLocation":{ + "shape":"String", + "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + }, + "RemoveOutputLocation":{ + "shape":"BoxedBoolean", + "documentation":"

If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

" + }, + "EncryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration for the query results.

" + }, + "RemoveEncryptionConfiguration":{ + "shape":"BoxedBoolean", + "documentation":"

If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

" + } + }, + "documentation":"

The information about the updates in the query results, such as output location and encryption configuration for the query results.

" }, "ResultSet":{ "type":"structure", @@ -765,10 +1074,7 @@ }, "StartQueryExecutionInput":{ "type":"structure", - "required":[ - "QueryString", - "ResultConfiguration" - ], + "required":["QueryString"], "members":{ "QueryString":{ "shape":"QueryString", @@ -785,7 +1091,11 @@ }, "ResultConfiguration":{ "shape":"ResultConfiguration", - "documentation":"

Specifies information about where and how to save the results of the query execution.

" + "documentation":"

Specifies information about where and how to save the results of the query execution. If the query runs in a workgroup, then workgroup's settings may override query settings. This affects the query results location. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup in which the query is being started.

" } } }, @@ -823,19 +1133,77 @@ } }, "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique per resource.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag values are case-sensitive.

" + } + }, + "documentation":"

A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

Requests that one or more tags are added to the resource (such as a workgroup) for the specified ARN.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tags, separated by commas, to be added to the resource, such as a workgroup.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "ThrottleReason":{ "type":"string", "documentation":"

The reason for the query throttling, for example, when it exceeds the concurrent query limit.

", "enum":["CONCURRENT_QUERY_LIMIT_EXCEEDED"] }, - "Token":{"type":"string"}, + "Token":{ + "type":"string", + "max":1024, + "min":1 + }, "TooManyRequestsException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"}, "Reason":{"shape":"ThrottleReason"} }, - "documentation":"

Indicates that the request was throttled and includes the reason for throttling, for example, the limit of concurrent queries has been exceeded.

", + "documentation":"

Indicates that the request was throttled.

", "exception":true }, "UnprocessedNamedQueryId":{ @@ -882,6 +1250,174 @@ "type":"list", "member":{"shape":"UnprocessedQueryExecutionId"} }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

Removes one or more tags from the workgroup resource for the specified ARN.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

Removes the tags associated with one or more tag keys from the workgroup resource.

" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateWorkGroupInput":{ + "type":"structure", + "required":["WorkGroup"], + "members":{ + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The specified workgroup that will be updated.

" + }, + "Description":{ + "shape":"WorkGroupDescriptionString", + "documentation":"

The workgroup description.

" + }, + "ConfigurationUpdates":{ + "shape":"WorkGroupConfigurationUpdates", + "documentation":"

The workgroup configuration that will be updated for the given workgroup.

" + }, + "State":{ + "shape":"WorkGroupState", + "documentation":"

The workgroup state that will be updated for the given workgroup.

" + } + } + }, + "UpdateWorkGroupOutput":{ + "type":"structure", + "members":{ + } + }, + "WorkGroup":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"WorkGroupName", + "documentation":"

The workgroup name.

" + }, + "State":{ + "shape":"WorkGroupState", + "documentation":"

The state of the workgroup: ENABLED or DISABLED.

" + }, + "Configuration":{ + "shape":"WorkGroupConfiguration", + "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limit for the amount of data scanned per query, if it is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + }, + "Description":{ + "shape":"WorkGroupDescriptionString", + "documentation":"

The workgroup description.

" + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

The date and time the workgroup was created.

" + } + }, + "documentation":"

A workgroup, which contains a name, description, creation time, state, and other configuration, listed under WorkGroup$Configuration. Each workgroup enables you to isolate queries for you or your group of users from other queries in the same account, to configure the query results location and the encryption configuration (known as workgroup settings), to enable sending query metrics to Amazon CloudWatch, and to establish per-query data usage control limits for all queries in a workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + }, + "WorkGroupConfiguration":{ + "type":"structure", + "members":{ + "ResultConfiguration":{ + "shape":"ResultConfiguration", + "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results.

" + }, + "EnforceWorkGroupConfiguration":{ + "shape":"BoxedBoolean", + "documentation":"

If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings.

" + }, + "PublishCloudWatchMetricsEnabled":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates that the Amazon CloudWatch metrics are enabled for the workgroup.

" + }, + "BytesScannedCutoffPerQuery":{ + "shape":"BytesScannedCutoffValue", + "documentation":"

The upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan.

" + } + }, + "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limit for the amount of data scanned per query, if it is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + }, + "WorkGroupConfigurationUpdates":{ + "type":"structure", + "members":{ + "EnforceWorkGroupConfiguration":{ + "shape":"BoxedBoolean", + "documentation":"

If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\" client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings.

" + }, + "ResultConfigurationUpdates":{ + "shape":"ResultConfigurationUpdates", + "documentation":"

The result configuration information about the queries in this workgroup that will be updated. Includes the updated results location and an updated option for encrypting query results.

" + }, + "PublishCloudWatchMetricsEnabled":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates whether this workgroup enables publishing metrics to Amazon CloudWatch.

" + }, + "BytesScannedCutoffPerQuery":{ + "shape":"BytesScannedCutoffValue", + "documentation":"

The upper limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan.

" + }, + "RemoveBytesScannedCutoffPerQuery":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery

" + } + }, + "documentation":"

The configuration information that will be updated for this workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, whether the workgroup settings override the client-side settings, and the data usage limit for the amount of bytes scanned per query, if it is specified.

" + }, + "WorkGroupDescriptionString":{ + "type":"string", + "max":1024, + "min":0 + }, + "WorkGroupName":{ + "type":"string", + "pattern":"[a-zA-z0-9._-]{1,128}" + }, + "WorkGroupState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "WorkGroupSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup.

" + }, + "State":{ + "shape":"WorkGroupState", + "documentation":"

The state of the workgroup.

" + }, + "Description":{ + "shape":"WorkGroupDescriptionString", + "documentation":"

The workgroup description.

" + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

The workgroup creation date and time.

" + } + }, + "documentation":"

The summary information for the workgroup, which includes its name, state, description, and the date and time it was created.

" + }, + "WorkGroupsList":{ + "type":"list", + "member":{"shape":"WorkGroupSummary"}, + "max":50, + "min":0 + }, "datumList":{ "type":"list", "member":{"shape":"Datum"} diff --git a/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json b/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json index ea142457..e3f812a1 100644 --- a/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json +++ b/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "DescribeScalingPlanResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScalingPlanResources" + }, + "DescribeScalingPlans": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScalingPlans" + } + } } diff --git a/botocore/data/autoscaling/2011-01-01/paginators-1.json b/botocore/data/autoscaling/2011-01-01/paginators-1.json index 31bc0944..57bafe33 100644 --- a/botocore/data/autoscaling/2011-01-01/paginators-1.json +++ b/botocore/data/autoscaling/2011-01-01/paginators-1.json @@ -47,6 +47,18 @@ "output_token": "NextToken", "limit_key": "MaxRecords", "result_key": "Tags" + }, + "DescribeLoadBalancerTargetGroups": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "LoadBalancerTargetGroups" + }, + "DescribeLoadBalancers": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "LoadBalancers" } } } diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 709fb3c0..a836e2b3 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -39,7 +39,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancers":{ "name":"AttachLoadBalancers", @@ -56,7 +56,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more Classic Load Balancers to the specified Auto Scaling group.

To attach an Application Load Balancer instead, see AttachLoadBalancerTargetGroups.

To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Attaches one or more Classic Load Balancers to the specified Auto Scaling group.

To attach an Application Load Balancer or a Network Load Balancer instead, see AttachLoadBalancerTargetGroups.

To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "BatchDeleteScheduledAction":{ "name":"BatchDeleteScheduledAction", @@ -121,7 +121,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Amazon EC2 Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateLaunchConfiguration":{ "name":"CreateLaunchConfiguration", @@ -135,7 +135,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Amazon EC2 Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateOrUpdateTags":{ "name":"CreateOrUpdateTags", @@ -258,7 +258,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the current Auto Scaling resource limits for your AWS account.

For information about requesting an increase in these limits, see Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the current Auto Scaling resource limits for your AWS account.

For information about requesting an increase in these limits, see Amazon EC2 Auto Scaling Limits in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeAdjustmentTypes":{ "name":"DescribeAdjustmentTypes", @@ -402,7 +402,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the load balancers for the specified Auto Scaling group.

This operation describes only Classic Load Balancers. If you have Application Load Balancers, use DescribeLoadBalancerTargetGroups instead.

" + "documentation":"

Describes the load balancers for the specified Auto Scaling group.

This operation describes only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use DescribeLoadBalancerTargetGroups instead.

" }, "DescribeMetricCollectionTypes":{ "name":"DescribeMetricCollectionTypes", @@ -581,7 +581,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers, use DetachLoadBalancerTargetGroups instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. The instances remain running.

" + "documentation":"

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use DetachLoadBalancerTargetGroups instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. The instances remain running.

" }, "DisableMetricsCollection":{ "name":"DisableMetricsCollection", @@ -667,7 +667,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

" + "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

" }, "PutNotificationConfiguration":{ "name":"PutNotificationConfiguration", @@ -681,7 +681,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting SNS Notifications When Your Auto Scaling Group Scales in the Auto Scaling User Guide.

" + "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS Notifications When Your Auto Scaling Group Scales in the Amazon EC2 Auto Scaling User Guide.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -699,7 +699,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

If you exceed your maximum limit of step adjustments, which by default is 20 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

" + "documentation":"

Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

" }, "PutScheduledUpdateGroupAction":{ "name":"PutScheduledUpdateGroupAction", @@ -755,7 +755,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the size of the specified Auto Scaling group.

For more information about desired capacity, see What Is Amazon EC2 Auto Scaling? in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Sets the size of the specified Auto Scaling group.

For more information about desired capacity, see What Is Amazon EC2 Auto Scaling? in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceHealth":{ "name":"SetInstanceHealth", @@ -784,7 +784,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Updates the instance protection settings of the specified instances.

For more information, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendProcesses":{ "name":"SuspendProcesses", @@ -924,7 +924,7 @@ "documentation":"

The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

" } }, - "documentation":"

Describes a policy adjustment type.

For more information, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes a policy adjustment type.

" }, "AdjustmentTypes":{ "type":"list", @@ -1112,7 +1112,7 @@ }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", @@ -1136,7 +1136,7 @@ }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

For more information, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", @@ -1157,7 +1157,7 @@ "members":{ "AutoScalingGroupNames":{ "shape":"AutoScalingGroupNames", - "documentation":"

The names of the Auto Scaling groups. You can specify up to MaxRecords names. If you omit this parameter, all Auto Scaling groups are described.

" + "documentation":"

The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords parameter.

If you omit this parameter, all Auto Scaling groups are described.

" }, "NextToken":{ "shape":"XmlString", @@ -1165,7 +1165,7 @@ }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

" + "documentation":"

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

" } } }, @@ -1228,7 +1228,7 @@ }, "ProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

" + "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

For more information, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" } }, "documentation":"

Describes an EC2 instance associated with an Auto Scaling group.

" @@ -1445,7 +1445,7 @@ }, "LoadBalancerNames":{ "shape":"LoadBalancerNames", - "documentation":"

One or more Classic Load Balancers. To specify an Application Load Balancer, use TargetGroupARNs instead.

For more information, see Using a Load Balancer With an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

One or more Classic Load Balancers. To specify an Application Load Balancer or a Network Load Balancer, use TargetGroupARNs instead.

For more information, see Using a Load Balancer With an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "TargetGroupARNs":{ "shape":"TargetGroupARNs", @@ -1461,7 +1461,7 @@ }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", @@ -1469,11 +1469,11 @@ }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide.

" + "documentation":"

One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide.

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookSpecificationList":{ "shape":"LifecycleHookSpecifications", @@ -1499,27 +1499,27 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

If you do not specify InstanceId, you must specify ImageId.

For more information, see Finding an AMI in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

If you do not specify InstanceId, you must specify ImageId.

For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

" }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.

" }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

One or more security groups with which to associate the instances.

If your instances are launched in EC2-Classic, you can either specify security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide.

If your instances are launched into a VPC, specify security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

One or more security groups with which to associate the instances.

If your instances are launched in EC2-Classic, you can either specify security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon EC2 User Guide for Linux Instances.

If your instances are launched into a VPC, specify security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" }, "ClassicLinkVPCId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter is supported only if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter is supported only if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "ClassicLinkVPCSecurityGroups":{ "shape":"ClassicLinkVPCSecurityGroups", - "documentation":"

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. This parameter is required if you specify a ClassicLink-enabled VPC, and is not supported otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. This parameter is required if you specify a ClassicLink-enabled VPC, and is not supported otherwise. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "UserData":{ "shape":"XmlStringUserData", - "documentation":"

The user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceId":{ "shape":"XmlStringMaxLen19", @@ -1527,7 +1527,7 @@ }, "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The instance type of the EC2 instance.

If you do not specify InstanceId, you must specify InstanceType.

For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The instance type of the EC2 instance.

If you do not specify InstanceId, you must specify InstanceType.

For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.

" }, "KernelId":{ "shape":"XmlStringMaxLen255", @@ -1539,7 +1539,7 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

One or more mappings that specify how block devices are exposed to the instance. For more information, see Block Device Mapping in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

One or more mappings that specify how block devices are exposed to the instance. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", @@ -1551,11 +1551,11 @@ }, "IamInstanceProfile":{ "shape":"XmlStringMaxLen1600", - "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

EC2 instances launched with an IAM role automatically have AWS security credentials available. You can use IAM roles with Amazon EC2 Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Launch Auto Scaling Instances with an IAM Role in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

EC2 instances launched with an IAM role automatically have AWS security credentials available. You can use IAM roles with Amazon EC2 Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Use an IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

Indicates whether the instance is optimized for Amazon EBS I/O. By default, the instance is not optimized for EBS I/O. The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional usage charges apply. For more information, see Amazon EBS-Optimized Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether the instance is optimized for Amazon EBS I/O. By default, the instance is not optimized for EBS I/O. The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional usage charges apply. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", @@ -1606,7 +1606,7 @@ "documentation":"

The unit of the metric.

" } }, - "documentation":"

Configures a customized metric for a target tracking policy.

" + "documentation":"

Configures a customized metric for a target tracking policy to use with Amazon EC2 Auto Scaling.

" }, "DeleteAutoScalingGroupType":{ "type":"structure", @@ -1707,11 +1707,11 @@ "members":{ "MaxNumberOfAutoScalingGroups":{ "shape":"MaxNumberOfAutoScalingGroups", - "documentation":"

The maximum number of groups allowed for your AWS account. The default limit is 20 per region.

" + "documentation":"

The maximum number of groups allowed for your AWS account. The default limit is 200 per region.

" }, "MaxNumberOfLaunchConfigurations":{ "shape":"MaxNumberOfLaunchConfigurations", - "documentation":"

The maximum number of launch configurations allowed for your AWS account. The default limit is 100 per region.

" + "documentation":"

The maximum number of launch configurations allowed for your AWS account. The default limit is 200 per region.

" }, "NumberOfAutoScalingGroups":{ "shape":"NumberOfAutoScalingGroups", @@ -2099,7 +2099,7 @@ }, "VolumeType":{ "shape":"BlockDeviceEbsVolumeType", - "documentation":"

The volume type. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Valid values: standard | io1 | gp2

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Valid values: standard | io1 | gp2

" }, "DeleteOnTermination":{ "shape":"BlockDeviceEbsDeleteOnTermination", @@ -2111,7 +2111,7 @@ }, "Encrypted":{ "shape":"BlockDeviceEbsEncrypted", - "documentation":"

Indicates whether the volume should be encrypted. Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or an unencrypted volume from an encrypted snapshot. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether the volume should be encrypted. Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or an unencrypted volume from an encrypted snapshot. For more information, see Amazon EBS Encryption in the Amazon EC2 User Guide for Linux Instances.

" } }, "documentation":"

Describes an Amazon EBS volume.

" @@ -2318,7 +2318,7 @@ }, "ProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

" + "documentation":"

Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.

For more information, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" } }, "documentation":"

Describes an EC2 instance.

" @@ -2366,8 +2366,8 @@ "documentation":"

The number of Spot pools to use to allocate your Spot capacity. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate.

The range is 1–20 and the default is 2.

" }, "SpotMaxPrice":{ - "shape":"SpotPrice", - "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave this value blank (which is the default), the maximum Spot price is set at the On-Demand price.

" + "shape":"MixedInstanceSpotPrice", + "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.

To remove a value that you previously set, include the parameter but leave the value blank.

" } }, "documentation":"

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types.

" @@ -2419,11 +2419,11 @@ }, "ClassicLinkVPCId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter can only be used if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter can only be used if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "ClassicLinkVPCSecurityGroups":{ "shape":"ClassicLinkVPCSecurityGroups", - "documentation":"

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. This parameter is required if you specify a ClassicLink-enabled VPC, and cannot be used otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. This parameter is required if you specify a ClassicLink-enabled VPC, and cannot be used otherwise. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "UserData":{ "shape":"XmlStringUserData", @@ -2619,7 +2619,7 @@ "documentation":"

Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON. The default value is CONTINUE.

" } }, - "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookNames":{ "type":"list", @@ -2662,7 +2662,7 @@ "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.

" } }, - "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or whenever it terminates instances.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookSpecifications":{ "type":"list", @@ -2834,6 +2834,11 @@ "type":"integer", "deprecated":true }, + "MixedInstanceSpotPrice":{ + "type":"string", + "max":255, + "min":0 + }, "MixedInstancesPolicy":{ "type":"structure", "members":{ @@ -2932,7 +2937,7 @@ }, "ResourceLabel":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Identifies the resource associated with the metric type. The following predefined metrics are available:

For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, and ASGAverageNetworkOut, the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget, the parameter must be specified in the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and targetgroup/target-group-name/target-group-id is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.

" + "documentation":"

Identifies the resource associated with the metric type. The following predefined metrics are available:

For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, and ASGAverageNetworkOut, the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget, the parameter must be specified in the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and targetgroup/target-group-name/target-group-id is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.

" } }, "documentation":"

Configures a predefined metric for a target tracking policy.

" @@ -3122,7 +3127,7 @@ }, "Recurrence":{ "shape":"XmlStringMaxLen255", - "documentation":"

The recurring schedule for this action, in Unix cron syntax format. For more information about this format, see Crontab.

" + "documentation":"

The recurring schedule for this action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. For more information about this format, see Crontab.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -3400,7 +3405,7 @@ }, "Recurrence":{ "shape":"XmlStringMaxLen255", - "documentation":"

The recurring schedule for the action, in Unix cron syntax format. For more information about this format, see Crontab.

" + "documentation":"

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. For more information about this format, see Crontab.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -3671,7 +3676,7 @@ "documentation":"

Indicates whether scaling in by the target tracking policy is disabled. If scaling in is disabled, the target tracking policy doesn't remove instances from the Auto Scaling group. Otherwise, the target tracking policy can remove instances from the Auto Scaling group. The default is disabled.

" } }, - "documentation":"

Represents a target tracking policy configuration.

" + "documentation":"

Represents a target tracking policy configuration to use with Amazon EC2 Auto Scaling.

" }, "TerminateInstanceInAutoScalingGroupType":{ "type":"structure", @@ -3745,7 +3750,7 @@ }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The name of the placement group into which to launch your instances, if any. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", @@ -3753,11 +3758,11 @@ }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide.

" + "documentation":"

A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide.

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", @@ -3827,5 +3832,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing.

For more information, see the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing.

For more information, including information about granting IAM users required permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto Scaling User Guide.

" } diff --git a/botocore/data/backup/2018-11-15/paginators-1.json b/botocore/data/backup/2018-11-15/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/backup/2018-11-15/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/backup/2018-11-15/service-2.json b/botocore/data/backup/2018-11-15/service-2.json new file mode 100644 index 00000000..54c4c0ca --- /dev/null +++ b/botocore/data/backup/2018-11-15/service-2.json @@ -0,0 +1,3148 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-11-15", + "endpointPrefix":"backup", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Backup", + "serviceId":"Backup", + "signatureVersion":"v4", + "uid":"backup-2018-11-15" + }, + "operations":{ + "CreateBackupPlan":{ + "name":"CreateBackupPlan", + "http":{ + "method":"PUT", + "requestUri":"/backup/plans/" + }, + "input":{"shape":"CreateBackupPlanInput"}, + "output":{"shape":"CreateBackupPlanOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Backup plans are documents that contain information that AWS Backup uses to schedule tasks that create recovery points of resources.

If you call CreateBackupPlan with a plan that already exists, the existing backupPlanId is returned.

", + "idempotent":true + }, + "CreateBackupSelection":{ + "name":"CreateBackupSelection", + "http":{ + "method":"PUT", + "requestUri":"/backup/plans/{backupPlanId}/selections/" + }, + "input":{"shape":"CreateBackupSelectionInput"}, + "output":{"shape":"CreateBackupSelectionOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Creates a JSON document that specifies a set of resources to assign to a backup plan. Resources can be included by specifying patterns for a ListOfTags and selected Resources.

For example, consider the following patterns:

Using these patterns would back up all Amazon Elastic Block Store (Amazon EBS) volumes that are tagged as \"department=finance\", \"importance=critical\", in addition to an EBS volume with the specified volume Id.

Resources and conditions are additive in that all resources that match the pattern are selected. This shouldn't be confused with a logical AND, where all conditions must match. The matching patterns are logically 'put together using the OR operator. In other words, all patterns that match are selected for backup.

", + "idempotent":true + }, + "CreateBackupVault":{ + "name":"CreateBackupVault", + "http":{ + "method":"PUT", + "requestUri":"/backup-vaults/{backupVaultName}" + }, + "input":{"shape":"CreateBackupVaultInput"}, + "output":{"shape":"CreateBackupVaultOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"LimitExceededException"}, + {"shape":"AlreadyExistsException"} + ], + "documentation":"

Creates a logical container where backups are stored. A CreateBackupVault request includes a name, optionally one or more resource tags, an encryption key, and a request ID.

Sensitive data, such as passport numbers, should not be included the name of a backup vault.

", + "idempotent":true + }, + "DeleteBackupPlan":{ + "name":"DeleteBackupPlan", + "http":{ + "method":"DELETE", + "requestUri":"/backup/plans/{backupPlanId}" + }, + "input":{"shape":"DeleteBackupPlanInput"}, + "output":{"shape":"DeleteBackupPlanOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes a backup plan. A backup plan can only be deleted after all associated selections of resources have been deleted. Deleting a backup plan deletes the current version of a backup plan. Previous versions, if any, will still exist.

" + }, + "DeleteBackupSelection":{ + "name":"DeleteBackupSelection", + "http":{ + "method":"DELETE", + "requestUri":"/backup/plans/{backupPlanId}/selections/{selectionId}" + }, + "input":{"shape":"DeleteBackupSelectionInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Deletes the resource selection associated with a backup plan that is specified by the SelectionId.

" + }, + "DeleteBackupVault":{ + "name":"DeleteBackupVault", + "http":{ + "method":"DELETE", + "requestUri":"/backup-vaults/{backupVaultName}" + }, + "input":{"shape":"DeleteBackupVaultInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes the backup vault identified by its name. A vault can be deleted only if it is empty.

" + }, + "DeleteBackupVaultAccessPolicy":{ + "name":"DeleteBackupVaultAccessPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/backup-vaults/{backupVaultName}/access-policy" + }, + "input":{"shape":"DeleteBackupVaultAccessPolicyInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Deletes the policy document that manages permissions on a backup vault.

", + "idempotent":true + }, + "DeleteBackupVaultNotifications":{ + "name":"DeleteBackupVaultNotifications", + "http":{ + "method":"DELETE", + "requestUri":"/backup-vaults/{backupVaultName}/notification-configuration" + }, + "input":{"shape":"DeleteBackupVaultNotificationsInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Deletes event notifications for the specified backup vault.

", + "idempotent":true + }, + "DeleteRecoveryPoint":{ + "name":"DeleteRecoveryPoint", + "http":{ + "method":"DELETE", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}" + }, + "input":{"shape":"DeleteRecoveryPointInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes the recovery point specified by a recovery point ID.

", + "idempotent":true + }, + "DescribeBackupJob":{ + "name":"DescribeBackupJob", + "http":{ + "method":"GET", + "requestUri":"/backup-jobs/{backupJobId}" + }, + "input":{"shape":"DescribeBackupJobInput"}, + "output":{"shape":"DescribeBackupJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DependencyFailureException"} + ], + "documentation":"

Returns metadata associated with creating a backup of a resource.

", + "idempotent":true + }, + "DescribeBackupVault":{ + "name":"DescribeBackupVault", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}" + }, + "input":{"shape":"DescribeBackupVaultInput"}, + "output":{"shape":"DescribeBackupVaultOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns metadata about a backup vault specified by its name.

", + "idempotent":true + }, + "DescribeProtectedResource":{ + "name":"DescribeProtectedResource", + "http":{ + "method":"GET", + "requestUri":"/resources/{resourceArn}" + }, + "input":{"shape":"DescribeProtectedResourceInput"}, + "output":{"shape":"DescribeProtectedResourceOutput"}, + "errors":[ + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns information about a saved resource, including the last time it was backed-up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource.

", + "idempotent":true + }, + "DescribeRecoveryPoint":{ + "name":"DescribeRecoveryPoint", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}" + }, + "input":{"shape":"DescribeRecoveryPointInput"}, + "output":{"shape":"DescribeRecoveryPointOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns metadata associated with a recovery point, including ID, status, encryption, and lifecycle.

", + "idempotent":true + }, + "DescribeRestoreJob":{ + "name":"DescribeRestoreJob", + "http":{ + "method":"GET", + "requestUri":"/restore-jobs/{restoreJobId}" + }, + "input":{"shape":"DescribeRestoreJobInput"}, + "output":{"shape":"DescribeRestoreJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DependencyFailureException"} + ], + "documentation":"

Returns metadata associated with a restore job that is specified by a job ID.

", + "idempotent":true + }, + "ExportBackupPlanTemplate":{ + "name":"ExportBackupPlanTemplate", + "http":{ + "method":"GET", + "requestUri":"/backup/plans/{backupPlanId}/toTemplate/" + }, + "input":{"shape":"ExportBackupPlanTemplateInput"}, + "output":{"shape":"ExportBackupPlanTemplateOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the backup plan that is specified by the plan ID as a backup template.

" + }, + "GetBackupPlan":{ + "name":"GetBackupPlan", + "http":{ + "method":"GET", + "requestUri":"/backup/plans/{backupPlanId}/" + }, + "input":{"shape":"GetBackupPlanInput"}, + "output":{"shape":"GetBackupPlanOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns the body of a backup plan in JSON format, in addition to plan metadata.

", + "idempotent":true + }, + "GetBackupPlanFromJSON":{ + "name":"GetBackupPlanFromJSON", + "http":{ + "method":"POST", + "requestUri":"/backup/template/json/toPlan" + }, + "input":{"shape":"GetBackupPlanFromJSONInput"}, + "output":{"shape":"GetBackupPlanFromJSONOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Returns a valid JSON document specifying a backup plan or an error.

" + }, + "GetBackupPlanFromTemplate":{ + "name":"GetBackupPlanFromTemplate", + "http":{ + "method":"GET", + "requestUri":"/backup/template/plans/{templateId}/toPlan" + }, + "input":{"shape":"GetBackupPlanFromTemplateInput"}, + "output":{"shape":"GetBackupPlanFromTemplateOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the template specified by its templateId as a backup plan.

" + }, + "GetBackupSelection":{ + "name":"GetBackupSelection", + "http":{ + "method":"GET", + "requestUri":"/backup/plans/{backupPlanId}/selections/{selectionId}" + }, + "input":{"shape":"GetBackupSelectionInput"}, + "output":{"shape":"GetBackupSelectionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns selection metadata and a document in JSON format that specifies a list of resources that are associated with a backup plan.

", + "idempotent":true + }, + "GetBackupVaultAccessPolicy":{ + "name":"GetBackupVaultAccessPolicy", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/access-policy" + }, + "input":{"shape":"GetBackupVaultAccessPolicyInput"}, + "output":{"shape":"GetBackupVaultAccessPolicyOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns the access policy document that is associated with the named backup vault.

", + "idempotent":true + }, + "GetBackupVaultNotifications":{ + "name":"GetBackupVaultNotifications", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/notification-configuration" + }, + "input":{"shape":"GetBackupVaultNotificationsInput"}, + "output":{"shape":"GetBackupVaultNotificationsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns event notifications for the specified backup vault.

", + "idempotent":true + }, + "GetRecoveryPointRestoreMetadata":{ + "name":"GetRecoveryPointRestoreMetadata", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/restore-metadata" + }, + "input":{"shape":"GetRecoveryPointRestoreMetadataInput"}, + "output":{"shape":"GetRecoveryPointRestoreMetadataOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns two sets of metadata key-value pairs. The first set lists the metadata that the recovery point was created with. The second set lists the metadata key-value pairs that are required to restore the recovery point.

These sets can be the same, or the restore metadata set can contain different values if the target service to be restored has changed since the recovery point was created and now requires additional or different information in order to be restored.

", + "idempotent":true + }, + "GetSupportedResourceTypes":{ + "name":"GetSupportedResourceTypes", + "http":{ + "method":"GET", + "requestUri":"/supported-resource-types" + }, + "output":{"shape":"GetSupportedResourceTypesOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns the AWS resource types supported by AWS Backup.

" + }, + "ListBackupJobs":{ + "name":"ListBackupJobs", + "http":{ + "method":"GET", + "requestUri":"/backup-jobs/" + }, + "input":{"shape":"ListBackupJobsInput"}, + "output":{"shape":"ListBackupJobsOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns metadata about your backup jobs.

", + "idempotent":true + }, + "ListBackupPlanTemplates":{ + "name":"ListBackupPlanTemplates", + "http":{ + "method":"GET", + "requestUri":"/backup/template/plans" + }, + "input":{"shape":"ListBackupPlanTemplatesInput"}, + "output":{"shape":"ListBackupPlanTemplatesOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns metadata of your saved backup plan templates, including the template ID, name, and the creation and deletion dates.

" + }, + "ListBackupPlanVersions":{ + "name":"ListBackupPlanVersions", + "http":{ + "method":"GET", + "requestUri":"/backup/plans/{backupPlanId}/versions/" + }, + "input":{"shape":"ListBackupPlanVersionsInput"}, + "output":{"shape":"ListBackupPlanVersionsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns version metadata of your backup plans, including Amazon Resource Names (ARNs), backup plan IDs, creation and deletion dates, plan names, and version IDs.

", + "idempotent":true + }, + "ListBackupPlans":{ + "name":"ListBackupPlans", + "http":{ + "method":"GET", + "requestUri":"/backup/plans/" + }, + "input":{"shape":"ListBackupPlansInput"}, + "output":{"shape":"ListBackupPlansOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns metadata of your saved backup plans, including Amazon Resource Names (ARNs), plan IDs, creation and deletion dates, version IDs, plan names, and creator request IDs.

", + "idempotent":true + }, + "ListBackupSelections":{ + "name":"ListBackupSelections", + "http":{ + "method":"GET", + "requestUri":"/backup/plans/{backupPlanId}/selections/" + }, + "input":{"shape":"ListBackupSelectionsInput"}, + "output":{"shape":"ListBackupSelectionsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns an array containing metadata of the resources associated with the target backup plan.

", + "idempotent":true + }, + "ListBackupVaults":{ + "name":"ListBackupVaults", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/" + }, + "input":{"shape":"ListBackupVaultsInput"}, + "output":{"shape":"ListBackupVaultsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns a list of recovery point storage containers along with information about them.

", + "idempotent":true + }, + "ListProtectedResources":{ + "name":"ListProtectedResources", + "http":{ + "method":"GET", + "requestUri":"/resources/" + }, + "input":{"shape":"ListProtectedResourcesInput"}, + "output":{"shape":"ListProtectedResourcesOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns an array of resources successfully backed up by AWS Backup, including the time the resource was saved, an Amazon Resource Name (ARN) of the resource, and a resource type.

", + "idempotent":true + }, + "ListRecoveryPointsByBackupVault":{ + "name":"ListRecoveryPointsByBackupVault", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/" + }, + "input":{"shape":"ListRecoveryPointsByBackupVaultInput"}, + "output":{"shape":"ListRecoveryPointsByBackupVaultOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns detailed information about the recovery points stored in a backup vault.

", + "idempotent":true + }, + "ListRecoveryPointsByResource":{ + "name":"ListRecoveryPointsByResource", + "http":{ + "method":"GET", + "requestUri":"/resources/{resourceArn}/recovery-points/" + }, + "input":{"shape":"ListRecoveryPointsByResourceInput"}, + "output":{"shape":"ListRecoveryPointsByResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns detailed information about recovery points of the type specified by a resource Amazon Resource Name (ARN).

", + "idempotent":true + }, + "ListRestoreJobs":{ + "name":"ListRestoreJobs", + "http":{ + "method":"GET", + "requestUri":"/restore-jobs/" + }, + "input":{"shape":"ListRestoreJobsInput"}, + "output":{"shape":"ListRestoreJobsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns a list of jobs that AWS Backup initiated to restore a saved resource, including metadata about the recovery process.

", + "idempotent":true + }, + "ListTags":{ + "name":"ListTags", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}/" + }, + "input":{"shape":"ListTagsInput"}, + "output":{"shape":"ListTagsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns a list of key-value pairs assigned to a target recovery point, backup plan, or backup vault.

", + "idempotent":true + }, + "PutBackupVaultAccessPolicy":{ + "name":"PutBackupVaultAccessPolicy", + "http":{ + "method":"PUT", + "requestUri":"/backup-vaults/{backupVaultName}/access-policy" + }, + "input":{"shape":"PutBackupVaultAccessPolicyInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Sets a resource-based policy that is used to manage access permissions on the target backup vault. Requires a backup vault name and an access policy document in JSON format.

", + "idempotent":true + }, + "PutBackupVaultNotifications":{ + "name":"PutBackupVaultNotifications", + "http":{ + "method":"PUT", + "requestUri":"/backup-vaults/{backupVaultName}/notification-configuration" + }, + "input":{"shape":"PutBackupVaultNotificationsInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Turns on notifications on a backup vault for the specified topic and events.

", + "idempotent":true + }, + "StartBackupJob":{ + "name":"StartBackupJob", + "http":{ + "method":"PUT", + "requestUri":"/backup-jobs" + }, + "input":{"shape":"StartBackupJobInput"}, + "output":{"shape":"StartBackupJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Starts a job to create a one-time backup of the specified resource.

", + "idempotent":true + }, + "StartRestoreJob":{ + "name":"StartRestoreJob", + "http":{ + "method":"PUT", + "requestUri":"/restore-jobs" + }, + "input":{"shape":"StartRestoreJobInput"}, + "output":{"shape":"StartRestoreJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Recovers the saved resource identified by an Amazon Resource Name (ARN).

If the resource ARN is included in the request, then the last complete backup of that resource is recovered. If the ARN of a recovery point is supplied, then that recovery point is restored.

", + "idempotent":true + }, + "StopBackupJob":{ + "name":"StopBackupJob", + "http":{ + "method":"POST", + "requestUri":"/backup-jobs/{backupJobId}" + }, + "input":{"shape":"StopBackupJobInput"}, + "errors":[ + {"shape":"MissingParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Attempts to cancel a job to create a one-time backup of a resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN).

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/untag/{resourceArn}" + }, + "input":{"shape":"UntagResourceInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Removes a set of key-value pairs from a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN)

", + "idempotent":true + }, + "UpdateBackupPlan":{ + "name":"UpdateBackupPlan", + "http":{ + "method":"POST", + "requestUri":"/backup/plans/{backupPlanId}" + }, + "input":{"shape":"UpdateBackupPlanInput"}, + "output":{"shape":"UpdateBackupPlanOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Replaces the body of a saved backup plan identified by its backupPlanId with the input document in JSON format. The new version is uniquely identified by a VersionId.

", + "idempotent":true + }, + "UpdateRecoveryPointLifecycle":{ + "name":"UpdateRecoveryPointLifecycle", + "http":{ + "method":"POST", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}" + }, + "input":{"shape":"UpdateRecoveryPointLifecycleInput"}, + "output":{"shape":"UpdateRecoveryPointLifecycleOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Sets the transition lifecycle of a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

", + "idempotent":true + } + }, + "shapes":{ + "ARN":{"type":"string"}, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

" + }, + "Arn":{ + "shape":"string", + "documentation":"

" + }, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

The required resource already exists.

", + "exception":true + }, + "BackupJob":{ + "type":"structure", + "members":{ + "BackupJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

" + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a job to create a backup job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "State":{ + "shape":"BackupJobState", + "documentation":"

The current state of a resource recovery point.

" + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

A detailed message explaining the status of the job to back up a resource.

" + }, + "PercentDone":{ + "shape":"string", + "documentation":"

Contains an estimated percentage complete of a job at the time the job status was queried.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of a backup.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "CreatedBy":{ + "shape":"RecoveryPointCreator", + "documentation":"

Contains identifying information about the creation of a backup job, including the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId of the backup plan used to create it.

" + }, + "ExpectedCompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a job to back up resources is expected to be completed, in Unix format and Coordinated Universal Time (UTC). The value of ExpectedCompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "StartBy":{ + "shape":"timestamp", + "documentation":"

Specifies the time in Unix format and Coordinated Universal Time (UTC) when a backup job must be started before it is canceled. The value is calculated by adding the start window to the scheduled time. So if the scheduled time were 6:00 PM and the start window is 2 hours, the StartBy time would be 8:00 PM on the date specified. The value of StartBy is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource to be backed-up; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + }, + "BytesTransferred":{ + "shape":"Long", + "documentation":"

The size in bytes transferred to a backup vault at the time that the job status was queried.

" + } + }, + "documentation":"

Contains detailed information about a backup job.

" + }, + "BackupJobState":{ + "type":"string", + "enum":[ + "CREATED", + "PENDING", + "RUNNING", + "ABORTING", + "ABORTED", + "COMPLETED", + "FAILED", + "EXPIRED" + ] + }, + "BackupJobsList":{ + "type":"list", + "member":{"shape":"BackupJob"} + }, + "BackupPlan":{ + "type":"structure", + "required":[ + "BackupPlanName", + "Rules" + ], + "members":{ + "BackupPlanName":{ + "shape":"BackupPlanName", + "documentation":"

The display name of a backup plan.

" + }, + "Rules":{ + "shape":"BackupRules", + "documentation":"

An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources.

" + } + }, + "documentation":"

Contains an optional backup plan display name and an array of BackupRule objects, each of which specifies a backup rule. Each rule in a backup plan is a separate scheduled task and can back up a different selection of AWS resources.

" + }, + "BackupPlanInput":{ + "type":"structure", + "required":[ + "BackupPlanName", + "Rules" + ], + "members":{ + "BackupPlanName":{ + "shape":"BackupPlanName", + "documentation":"

The display name of a backup plan.

" + }, + "Rules":{ + "shape":"BackupRulesInput", + "documentation":"

An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources.

" + } + }, + "documentation":"

Contains an optional backup plan display name and an array of BackupRule objects, each of which specifies a backup rule. Each rule in a backup plan is a separate scheduled task and can back up a different selection of AWS resources.

" + }, + "BackupPlanName":{"type":"string"}, + "BackupPlanTemplatesList":{ + "type":"list", + "member":{"shape":"BackupPlanTemplatesListMember"} + }, + "BackupPlanTemplatesListMember":{ + "type":"structure", + "members":{ + "BackupPlanTemplateId":{ + "shape":"string", + "documentation":"

Uniquely identifies a stored backup plan template.

" + }, + "BackupPlanTemplateName":{ + "shape":"string", + "documentation":"

The optional display name of a backup plan template.

" + } + }, + "documentation":"

An object specifying metadata associated with a backup plan template.

" + }, + "BackupPlanVersionsList":{ + "type":"list", + "member":{"shape":"BackupPlansListMember"} + }, + "BackupPlansList":{ + "type":"list", + "member":{"shape":"BackupPlansListMember"} + }, + "BackupPlansListMember":{ + "type":"structure", + "members":{ + "BackupPlanArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.

" + }, + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a resource backup plan is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "DeletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of DeletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "VersionId":{ + "shape":"string", + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version IDs cannot be edited.

" + }, + "BackupPlanName":{ + "shape":"BackupPlanName", + "documentation":"

The display name of a saved backup plan.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + }, + "LastExecutionDate":{ + "shape":"timestamp", + "documentation":"

The last time a job to back up resources was executed with this rule. A date and time, in Unix format and Coordinated Universal Time (UTC). The value of LastExecutionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + }, + "documentation":"

Contains metadata about a backup plan.

" + }, + "BackupRule":{ + "type":"structure", + "required":[ + "RuleName", + "TargetBackupVaultName" + ], + "members":{ + "RuleName":{ + "shape":"BackupRuleName", + "documentation":"

An optional display name for a backup rule.

" + }, + "TargetBackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "ScheduleExpression":{ + "shape":"CronExpression", + "documentation":"

A CRON expression specifying when AWS Backup initiates a backup job.

" + }, + "StartWindowMinutes":{ + "shape":"WindowMinutes", + "documentation":"

An optional value that specifies a period of time in minutes after a backup is scheduled before a job is canceled if it doesn't start successfully.

" + }, + "CompletionWindowMinutes":{ + "shape":"WindowMinutes", + "documentation":"

A value in minutes after a backup job is successfully started before it must be completed or it is canceled by AWS Backup. This value is optional.

" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "RecoveryPointTags":{ + "shape":"Tags", + "documentation":"

An array of key-value pair strings that are assigned to resources that are associated with this rule when restored from backup.

" + }, + "RuleId":{ + "shape":"string", + "documentation":"

Uniquely identifies a rule that is used to schedule the backup of a selection of resources.

" + } + }, + "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" + }, + "BackupRuleInput":{ + "type":"structure", + "required":[ + "RuleName", + "TargetBackupVaultName" + ], + "members":{ + "RuleName":{ + "shape":"BackupRuleName", + "documentation":"

>An optional display name for a backup rule.

" + }, + "TargetBackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "ScheduleExpression":{ + "shape":"CronExpression", + "documentation":"

A CRON expression specifying when AWS Backup initiates a backup job.

" + }, + "StartWindowMinutes":{ + "shape":"WindowMinutes", + "documentation":"

The amount of time in minutes before beginning a backup.

" + }, + "CompletionWindowMinutes":{ + "shape":"WindowMinutes", + "documentation":"

The amount of time AWS Backup attempts a backup before canceling the job and returning an error.

" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days”. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "RecoveryPointTags":{ + "shape":"Tags", + "documentation":"

To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.

" + } + }, + "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" + }, + "BackupRuleName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9\\-\\_\\.]{1,50}$" + }, + "BackupRules":{ + "type":"list", + "member":{"shape":"BackupRule"} + }, + "BackupRulesInput":{ + "type":"list", + "member":{"shape":"BackupRuleInput"} + }, + "BackupSelection":{ + "type":"structure", + "required":[ + "SelectionName", + "IamRoleArn" + ], + "members":{ + "SelectionName":{ + "shape":"BackupSelectionName", + "documentation":"

The display name of a resource selection document.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The ARN of the IAM role that AWS Backup uses to authenticate when restoring the target resource; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "Resources":{ + "shape":"ResourceArns", + "documentation":"

An array of strings that either contain Amazon Resource Names (ARNs) or match patterns such as \"arn:aws:ec2:us-east-1:123456789012:volume/*\" of resources to assign to a backup plan.

" + }, + "ListOfTags":{ + "shape":"ListOfTags", + "documentation":"

An array of conditions used to specify a set of resources to assign to a backup plan; for example, \"StringEquals\": {\"ec2:ResourceTag/Department\": \"accounting\".

" + } + }, + "documentation":"

Used to specify a set of resources to a backup plan.

" + }, + "BackupSelectionName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9\\-\\_\\.]{1,50}$" + }, + "BackupSelectionsList":{ + "type":"list", + "member":{"shape":"BackupSelectionsListMember"} + }, + "BackupSelectionsListMember":{ + "type":"structure", + "members":{ + "SelectionId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to assign a set of resources to a backup plan.

" + }, + "SelectionName":{ + "shape":"BackupSelectionName", + "documentation":"

The display name of a resource selection document.

" + }, + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup plan is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role Amazon Resource Name (ARN) to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + } + }, + "documentation":"

Contains metadata about a BackupSelection object.

" + }, + "BackupVaultEvent":{ + "type":"string", + "enum":[ + "BACKUP_JOB_STARTED", + "BACKUP_JOB_COMPLETED", + "RESTORE_JOB_STARTED", + "RESTORE_JOB_COMPLETED", + "RECOVERY_POINT_MODIFIED", + "BACKUP_PLAN_CREATED", + "BACKUP_PLAN_MODIFIED" + ] + }, + "BackupVaultEvents":{ + "type":"list", + "member":{"shape":"BackupVaultEvent"} + }, + "BackupVaultList":{ + "type":"list", + "member":{"shape":"BackupVaultListMember"} + }, + "BackupVaultListMember":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a resource backup is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "EncryptionKeyArn":{ + "shape":"ARN", + "documentation":"

The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + }, + "NumberOfRecoveryPoints":{ + "shape":"long", + "documentation":"

The number of recovery points that are stored in a backup vault.

" + } + }, + "documentation":"

Contains metadata about a backup vault.

" + }, + "BackupVaultName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9\\-\\_\\.]{1,50}$" + }, + "Boolean":{"type":"boolean"}, + "CalculatedLifecycle":{ + "type":"structure", + "members":{ + "MoveToColdStorageAt":{ + "shape":"timestamp", + "documentation":"

A timestamp that specifies when to transition a recovery point to cold storage.

" + }, + "DeleteAt":{ + "shape":"timestamp", + "documentation":"

A timestamp that specifies when to delete a recovery point.

" + } + }, + "documentation":"

Contains DeleteAt and MoveToColdStorageAt timestamps, which are used to specify a lifecycle for a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "Condition":{ + "type":"structure", + "required":[ + "ConditionType", + "ConditionKey", + "ConditionValue" + ], + "members":{ + "ConditionType":{ + "shape":"ConditionType", + "documentation":"

An operation, such as StringEquals, that is applied to a key-value pair used to filter resources in a selection.

" + }, + "ConditionKey":{ + "shape":"ConditionKey", + "documentation":"

The key in a key-value pair. For example, in \"ec2:ResourceTag/Department\": \"accounting\", \"ec2:ResourceTag/Department\" is the key.

" + }, + "ConditionValue":{ + "shape":"ConditionValue", + "documentation":"

The value in a key-value pair. For example, in \"ec2:ResourceTag/Department\": \"accounting\", \"accounting\" is the value.

" + } + }, + "documentation":"

Contains an array of triplets made up of a condition type (such as StringEquals), a key, and a value. Conditions are used to filter resources in a selection that is assigned to a backup plan.

" + }, + "ConditionKey":{"type":"string"}, + "ConditionType":{ + "type":"string", + "enum":["STRINGEQUALS"] + }, + "ConditionValue":{"type":"string"}, + "CreateBackupPlanInput":{ + "type":"structure", + "required":["BackupPlan"], + "members":{ + "BackupPlan":{ + "shape":"BackupPlanInput", + "documentation":"

Specifies the body of a backup plan. Includes a BackupPlanName and one or more sets of Rules.

" + }, + "BackupPlanTags":{ + "shape":"Tags", + "documentation":"

To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. The specified tags are assigned to all backups created with this plan.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

Identifies the request and allows failed requests to be retried without the risk of executing the operation twice. If the request includes a CreatorRequestId that matches an existing backup plan, that plan is returned. This parameter is optional.

" + } + } + }, + "CreateBackupPlanOutput":{ + "type":"structure", + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "BackupPlanArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup plan is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "VersionId":{ + "shape":"string", + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1024 bytes long. They cannot be edited.

" + } + } + }, + "CreateBackupSelectionInput":{ + "type":"structure", + "required":[ + "BackupPlanId", + "BackupSelection" + ], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies the backup plan to be associated with the selection of resources.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "BackupSelection":{ + "shape":"BackupSelection", + "documentation":"

Specifies the body of a request to assign a set of resources to a backup plan.

It includes an array of resources, an optional array of patterns to exclude resources, an optional role to provide access to the AWS service the resource belongs to, and an optional array of tags used to identify a set of resources.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + } + } + }, + "CreateBackupSelectionOutput":{ + "type":"structure", + "members":{ + "SelectionId":{ + "shape":"string", + "documentation":"

Uniquely identifies the body of a request to assign a set of resources to a backup plan.

" + }, + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup selection is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "CreateBackupVaultInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "BackupVaultTags":{ + "shape":"Tags", + "documentation":"

Metadata that you can assign to help organize the resources that you create. Each tag is a key-value pair.

" + }, + "EncryptionKeyArn":{ + "shape":"ARN", + "documentation":"

The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + } + } + }, + "CreateBackupVaultOutput":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup vault is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "CronExpression":{"type":"string"}, + "DeleteBackupPlanInput":{ + "type":"structure", + "required":["BackupPlanId"], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + } + } + }, + "DeleteBackupPlanOutput":{ + "type":"structure", + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "BackupPlanArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.

" + }, + "DeletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "VersionId":{ + "shape":"string", + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version Ids cannot be edited.

" + } + } + }, + "DeleteBackupSelectionInput":{ + "type":"structure", + "required":[ + "BackupPlanId", + "SelectionId" + ], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "SelectionId":{ + "shape":"string", + "documentation":"

Uniquely identifies the body of a request to assign a set of resources to a backup plan.

", + "location":"uri", + "locationName":"selectionId" + } + } + }, + "DeleteBackupVaultAccessPolicyInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + } + } + }, + "DeleteBackupVaultInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"string", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and theAWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + } + } + }, + "DeleteBackupVaultNotificationsInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + } + } + }, + "DeleteRecoveryPointInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "location":"uri", + "locationName":"recoveryPointArn" + } + } + }, + "DependencyFailureException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

A dependent AWS service or resource returned an error to the AWS Backup service, and the action cannot be completed.

", + "exception":true, + "fault":true + }, + "DescribeBackupJobInput":{ + "type":"structure", + "required":["BackupJobId"], + "members":{ + "BackupJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

", + "location":"uri", + "locationName":"backupJobId" + } + } + }, + "DescribeBackupJobOutput":{ + "type":"structure", + "members":{ + "BackupJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

" + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a saved resource. The format of the ARN depends on the resource type.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a job to create a backup job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "State":{ + "shape":"BackupJobState", + "documentation":"

The current state of a resource recovery point.

" + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

A detailed message explaining the status of the job to back up a resource.

" + }, + "PercentDone":{ + "shape":"string", + "documentation":"

Contains an estimated percentage that is complete of a job at the time the job status was queried.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of a backup.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "CreatedBy":{ + "shape":"RecoveryPointCreator", + "documentation":"

Contains identifying information about the creation of a backup job, including the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId of the backup plan that is used to create it.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource to be backed-up; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + }, + "BytesTransferred":{ + "shape":"Long", + "documentation":"

The size in bytes transferred to a backup vault at the time that the job status was queried.

" + }, + "ExpectedCompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a job to back up resources is expected to be completed, in Unix format and Coordinated Universal Time (UTC). The value of ExpectedCompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "StartBy":{ + "shape":"timestamp", + "documentation":"

Specifies the time in Unix format and Coordinated Universal Time (UTC) when a backup job must be started before it is canceled. The value is calculated by adding the start window to the scheduled time. So if the scheduled time were 6:00 PM and the start window is 2 hours, the StartBy time would be 8:00 PM on the date specified. The value of StartBy is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "DescribeBackupVaultInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"string", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + } + } + }, + "DescribeBackupVaultOutput":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"string", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "EncryptionKeyArn":{ + "shape":"ARN", + "documentation":"

The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup vault is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + }, + "NumberOfRecoveryPoints":{ + "shape":"long", + "documentation":"

The number of recovery points that are stored in a backup vault.

" + } + } + }, + "DescribeProtectedResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "DescribeProtectedResourceOutput":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource saved as a recovery point; for example, an EBS volume or an Amazon RDS database.

" + }, + "LastBackupTime":{ + "shape":"timestamp", + "documentation":"

The date and time that a resource was last backed up, in Unix format and Coordinated Universal Time (UTC). The value of LastBackupTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "DescribeRecoveryPointInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "location":"uri", + "locationName":"recoveryPointArn" + } + } + }, + "DescribeRecoveryPointOutput":{ + "type":"structure", + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a saved resource. The format of the ARN depends on the resource type.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource to save as a recovery point; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + }, + "CreatedBy":{ + "shape":"RecoveryPointCreator", + "documentation":"

Contains identifying information about the creation of a recovery point, including the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId of the backup plan used to create it.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "Status":{ + "shape":"RecoveryPointStatus", + "documentation":"

A status code specifying the state of the recovery point.

A partial status indicates that the recovery point was not successfully re-created and must be retried.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a recovery point is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a job to create a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of a backup.

" + }, + "CalculatedLifecycle":{ + "shape":"CalculatedLifecycle", + "documentation":"

A CalculatedLifecycle object containing DeleteAt and MoveToColdStorageAt timestamps.

" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups that are transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "EncryptionKeyArn":{ + "shape":"ARN", + "documentation":"

The server-side encryption key used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "IsEncrypted":{ + "shape":"boolean", + "documentation":"

A Boolean value that is returned as TRUE if the specified recovery point is encrypted, or FALSE if the recovery point is not encrypted.

" + }, + "StorageClass":{ + "shape":"StorageClass", + "documentation":"

Specifies the storage class of the recovery point. Valid values are WARM or COLD.

" + }, + "LastRestoreTime":{ + "shape":"timestamp", + "documentation":"

The date and time that a recovery point was last restored, in Unix format and Coordinated Universal Time (UTC). The value of LastRestoreTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "DescribeRestoreJobInput":{ + "type":"structure", + "required":["RestoreJobId"], + "members":{ + "RestoreJobId":{ + "shape":"RestoreJobId", + "documentation":"

Uniquely identifies the job that restores a recovery point.

", + "location":"uri", + "locationName":"restoreJobId" + } + } + }, + "DescribeRestoreJobOutput":{ + "type":"structure", + "members":{ + "RestoreJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies the job that restores a recovery point.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a restore job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a job to restore a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "Status":{ + "shape":"RestoreJobStatus", + "documentation":"

Status code specifying the state of the job that is initiated by AWS Backup to restore a recovery point.

" + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

A detailed message explaining the status of a job to restore a recovery point.

" + }, + "PercentDone":{ + "shape":"string", + "documentation":"

Contains an estimated percentage that is complete of a job at the time the job status was queried.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of the restored resource.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "ExpectedCompletionTimeMinutes":{ + "shape":"Long", + "documentation":"

The amount of time in minutes that a job restoring a recovery point is expected to take.

" + }, + "CreatedResourceArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource whose recovery point is being restored. The format of the ARN depends on the resource type of the backed-up resource.

" + } + } + }, + "ExportBackupPlanTemplateInput":{ + "type":"structure", + "required":["BackupPlanId"], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + } + } + }, + "ExportBackupPlanTemplateOutput":{ + "type":"structure", + "members":{ + "BackupPlanTemplateJson":{ + "shape":"string", + "documentation":"

The body of a backup plan template in JSON format.

This is a signed JSON document that cannot be modified before being passed to GetBackupPlanFromJSON.

" + } + } + }, + "GetBackupPlanFromJSONInput":{ + "type":"structure", + "required":["BackupPlanTemplateJson"], + "members":{ + "BackupPlanTemplateJson":{ + "shape":"string", + "documentation":"

A customer-supplied backup plan document in JSON format.

" + } + } + }, + "GetBackupPlanFromJSONOutput":{ + "type":"structure", + "members":{ + "BackupPlan":{ + "shape":"BackupPlan", + "documentation":"

Specifies the body of a backup plan. Includes a BackupPlanName and one or more sets of Rules.

" + } + } + }, + "GetBackupPlanFromTemplateInput":{ + "type":"structure", + "required":["BackupPlanTemplateId"], + "members":{ + "BackupPlanTemplateId":{ + "shape":"string", + "documentation":"

Uniquely identifies a stored backup plan template.

", + "location":"uri", + "locationName":"templateId" + } + } + }, + "GetBackupPlanFromTemplateOutput":{ + "type":"structure", + "members":{ + "BackupPlanDocument":{ + "shape":"BackupPlan", + "documentation":"

Returns the body of a backup plan based on the target template, including the name, rules, and backup vault of the plan.

" + } + } + }, + "GetBackupPlanInput":{ + "type":"structure", + "required":["BackupPlanId"], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "VersionId":{ + "shape":"string", + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version IDs cannot be edited.

", + "location":"querystring", + "locationName":"versionId" + } + } + }, + "GetBackupPlanOutput":{ + "type":"structure", + "members":{ + "BackupPlan":{ + "shape":"BackupPlan", + "documentation":"

Specifies the body of a backup plan. Includes a BackupPlanName and one or more sets of Rules.

" + }, + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "BackupPlanArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.

" + }, + "VersionId":{ + "shape":"string", + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version IDs cannot be edited.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup plan is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "DeletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "LastExecutionDate":{ + "shape":"timestamp", + "documentation":"

The last time a job to back up resources was executed with this backup plan. A date and time, in Unix format and Coordinated Universal Time (UTC). The value of LastExecutionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "GetBackupSelectionInput":{ + "type":"structure", + "required":[ + "BackupPlanId", + "SelectionId" + ], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "SelectionId":{ + "shape":"string", + "documentation":"

Uniquely identifies the body of a request to assign a set of resources to a backup plan.

", + "location":"uri", + "locationName":"selectionId" + } + } + }, + "GetBackupSelectionOutput":{ + "type":"structure", + "members":{ + "BackupSelection":{ + "shape":"BackupSelection", + "documentation":"

Specifies the body of a request to assign a set of resources to a backup plan.

It includes an array of resources, an optional array of patterns to exclude resources, an optional role to provide access to the AWS service that the resource belongs to, and an optional array of tags used to identify a set of resources.

" + }, + "SelectionId":{ + "shape":"string", + "documentation":"

Uniquely identifies the body of a request to assign a set of resources to a backup plan.

" + }, + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup selection is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

" + } + } + }, + "GetBackupVaultAccessPolicyInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + } + } + }, + "GetBackupVaultAccessPolicyOutput":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "Policy":{ + "shape":"IAMPolicy", + "documentation":"

The backup vault access policy document in JSON format.

" + } + } + }, + "GetBackupVaultNotificationsInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + } + } + }, + "GetBackupVaultNotificationsOutput":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "SNSTopicArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies an Amazon Simple Notification Service (Amazon SNS) topic; for example, arn:aws:sns:us-west-2:111122223333:MyTopic.

" + }, + "BackupVaultEvents":{ + "shape":"BackupVaultEvents", + "documentation":"

An array of events that indicate the status of jobs to back up resources to the backup vault.

" + } + } + }, + "GetRecoveryPointRestoreMetadataInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "location":"uri", + "locationName":"recoveryPointArn" + } + } + }, + "GetRecoveryPointRestoreMetadataOutput":{ + "type":"structure", + "members":{ + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "RestoreMetadata":{ + "shape":"Metadata", + "documentation":"

A set of metadata key-value pairs that lists the metadata key-value pairs that are required to restore the recovery point.

" + } + } + }, + "GetSupportedResourceTypesOutput":{ + "type":"structure", + "members":{ + "ResourceTypes":{ + "shape":"ResourceTypes", + "documentation":"

Contains a string with the supported AWS resource types:

" + } + } + }, + "IAMPolicy":{"type":"string"}, + "IAMRoleArn":{"type":"string"}, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

Indicates that something is wrong with a parameter's value. For example, the value is out of range.

", + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

Indicates that something is wrong with the input to the request. For example, a parameter is of the wrong type.

", + "exception":true + }, + "Lifecycle":{ + "type":"structure", + "members":{ + "MoveToColdStorageAfterDays":{ + "shape":"Long", + "documentation":"

Specifies the number of days after creation that a recovery point is moved to cold storage.

" + }, + "DeleteAfterDays":{ + "shape":"Long", + "documentation":"

Specifies the number of days after creation that a recovery point is deleted. Must be greater than MoveToColdStorageAfterDays.

" + } + }, + "documentation":"

Contains an array of Transition objects specifying how long in days before a recovery point transitions to cold storage or is deleted.

" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

A limit in the request has been exceeded; for example, a maximum number of items allowed in a request.

", + "exception":true + }, + "ListBackupJobsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "ByResourceArn":{ + "shape":"ARN", + "documentation":"

Returns only backup jobs that match the specified resource Amazon Resource Name (ARN).

", + "location":"querystring", + "locationName":"resourceArn" + }, + "ByState":{ + "shape":"BackupJobState", + "documentation":"

Returns only backup jobs that are in the specified state.

", + "location":"querystring", + "locationName":"state" + }, + "ByBackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

Returns only backup jobs that will be stored in the specified backup vault. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"querystring", + "locationName":"backupVaultName" + }, + "ByCreatedBefore":{ + "shape":"timestamp", + "documentation":"

Returns only backup jobs that were created before the specified date.

", + "location":"querystring", + "locationName":"createdBefore" + }, + "ByCreatedAfter":{ + "shape":"timestamp", + "documentation":"

Returns only backup jobs that were created after the specified date.

", + "location":"querystring", + "locationName":"createdAfter" + }, + "ByResourceType":{ + "shape":"ResourceType", + "documentation":"

Returns only backup jobs for the specified resources:

", + "location":"querystring", + "locationName":"resourceType" + } + } + }, + "ListBackupJobsOutput":{ + "type":"structure", + "members":{ + "BackupJobs":{ + "shape":"BackupJobsList", + "documentation":"

An array of structures containing metadata about your backup jobs returned in JSON format.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListBackupPlanTemplatesInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListBackupPlanTemplatesOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "BackupPlanTemplatesList":{ + "shape":"BackupPlanTemplatesList", + "documentation":"

An array of template list items containing metadata about your saved templates.

" + } + } + }, + "ListBackupPlanVersionsInput":{ + "type":"structure", + "required":["BackupPlanId"], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListBackupPlanVersionsOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "BackupPlanVersionsList":{ + "shape":"BackupPlanVersionsList", + "documentation":"

An array of version list items containing metadata about your backup plans.

" + } + } + }, + "ListBackupPlansInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "IncludeDeleted":{ + "shape":"Boolean", + "documentation":"

A Boolean value with a default value of FALSE that returns deleted backup plans when set to TRUE.

", + "location":"querystring", + "locationName":"includeDeleted" + } + } + }, + "ListBackupPlansOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "BackupPlansList":{ + "shape":"BackupPlansList", + "documentation":"

An array of backup plan list items containing metadata about your saved backup plans.

" + } + } + }, + "ListBackupSelectionsInput":{ + "type":"structure", + "required":["BackupPlanId"], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListBackupSelectionsOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "BackupSelectionsList":{ + "shape":"BackupSelectionsList", + "documentation":"

An array of backup selection list items containing metadata about each resource in the list.

" + } + } + }, + "ListBackupVaultsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListBackupVaultsOutput":{ + "type":"structure", + "members":{ + "BackupVaultList":{ + "shape":"BackupVaultList", + "documentation":"

An array of backup vault list members containing vault metadata, including Amazon Resource Name (ARN), display name, creation date, number of saved recovery points, and encryption information if the resources saved in the backup vault are encrypted.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListOfTags":{ + "type":"list", + "member":{"shape":"Condition"} + }, + "ListProtectedResourcesInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProtectedResourcesOutput":{ + "type":"structure", + "members":{ + "Results":{ + "shape":"ProtectedResourcesList", + "documentation":"

An array of resources successfully backed up by AWS Backup including the time the resource was saved, an Amazon Resource Name (ARN) of the resource, and a resource type.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListRecoveryPointsByBackupVaultInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "ByResourceArn":{ + "shape":"ARN", + "documentation":"

Returns only recovery points that match the specified resource Amazon Resource Name (ARN).

", + "location":"querystring", + "locationName":"resourceArn" + }, + "ByResourceType":{ + "shape":"ResourceType", + "documentation":"

Returns only recovery points that match the specified resource type.

", + "location":"querystring", + "locationName":"resourceType" + }, + "ByBackupPlanId":{ + "shape":"string", + "documentation":"

Returns only recovery points that match the specified backup plan ID.

", + "location":"querystring", + "locationName":"backupPlanId" + }, + "ByCreatedBefore":{ + "shape":"timestamp", + "documentation":"

Returns only recovery points that were created before the specified timestamp.

", + "location":"querystring", + "locationName":"createdBefore" + }, + "ByCreatedAfter":{ + "shape":"timestamp", + "documentation":"

Returns only recovery points that were created after the specified timestamp.

", + "location":"querystring", + "locationName":"createdAfter" + } + } + }, + "ListRecoveryPointsByBackupVaultOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "RecoveryPoints":{ + "shape":"RecoveryPointByBackupVaultList", + "documentation":"

An array of objects that contain detailed information about recovery points saved in a backup vault.

" + } + } + }, + "ListRecoveryPointsByResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the resource type.

", + "location":"uri", + "locationName":"resourceArn" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListRecoveryPointsByResourceOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "RecoveryPoints":{ + "shape":"RecoveryPointByResourceList", + "documentation":"

An array of objects that contain detailed information about recovery points of the specified resource type.

" + } + } + }, + "ListRestoreJobsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListRestoreJobsOutput":{ + "type":"structure", + "members":{ + "RestoreJobs":{ + "shape":"RestoreJobsList", + "documentation":"

An array of objects that contain detailed information about jobs to restore saved resources.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListTagsInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the type of resource. Valid targets for ListTags are recovery points, backup plans, and backup vaults.

", + "location":"uri", + "locationName":"resourceArn" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTagsOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

To help organize your resources, you can assign your own metadata to the resources you create. Each tag is a key-value pair.

" + } + } + }, + "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "Metadata":{ + "type":"map", + "key":{"shape":"MetadataKey"}, + "value":{"shape":"MetadataValue"} + }, + "MetadataKey":{"type":"string"}, + "MetadataValue":{"type":"string"}, + "MissingParameterValueException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

Indicates that a required parameter is missing.

", + "exception":true + }, + "ProtectedResource":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + }, + "LastBackupTime":{ + "shape":"timestamp", + "documentation":"

The date and time a resource was last backed up, in Unix format and Coordinated Universal Time (UTC). The value of LastBackupTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + }, + "documentation":"

A structure that contains information about a backed-up resource.

" + }, + "ProtectedResourcesList":{ + "type":"list", + "member":{"shape":"ProtectedResource"} + }, + "PutBackupVaultAccessPolicyInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "Policy":{ + "shape":"IAMPolicy", + "documentation":"

The backup vault access policy document in JSON format.

" + } + } + }, + "PutBackupVaultNotificationsInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "SNSTopicArn", + "BackupVaultEvents" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "SNSTopicArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) that specifies the topic for a backup vault’s events; for example, arn:aws:sns:us-west-2:111122223333:MyVaultTopic.

" + }, + "BackupVaultEvents":{ + "shape":"BackupVaultEvents", + "documentation":"

An array of events that indicate the status of jobs to back up resources to the backup vault.

" + } + } + }, + "RecoveryPointByBackupVault":{ + "type":"structure", + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource saved as a recovery point; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + }, + "CreatedBy":{ + "shape":"RecoveryPointCreator", + "documentation":"

Contains identifying information about the creation of a recovery point, including the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId of the backup plan that is used to create it.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "Status":{ + "shape":"RecoveryPointStatus", + "documentation":"

A status code specifying the state of the recovery point.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a recovery point is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a job to restore a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of a backup.

" + }, + "CalculatedLifecycle":{ + "shape":"CalculatedLifecycle", + "documentation":"

A CalculatedLifecycle object containing DeleteAt and MoveToColdStorageAt timestamps.

" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "EncryptionKeyArn":{ + "shape":"ARN", + "documentation":"

The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "IsEncrypted":{ + "shape":"boolean", + "documentation":"

A Boolean value that is returned as TRUE if the specified recovery point is encrypted, or FALSE if the recovery point is not encrypted.

" + }, + "LastRestoreTime":{ + "shape":"timestamp", + "documentation":"

The date and time a recovery point was last restored, in Unix format and Coordinated Universal Time (UTC). The value of LastRestoreTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + }, + "documentation":"

Contains detailed information about the recovery points stored in a backup vault.

" + }, + "RecoveryPointByBackupVaultList":{ + "type":"list", + "member":{"shape":"RecoveryPointByBackupVault"} + }, + "RecoveryPointByResource":{ + "type":"structure", + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a recovery point is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "Status":{ + "shape":"RecoveryPointStatus", + "documentation":"

A status code specifying the state of the recovery point.

" + }, + "EncryptionKeyArn":{ + "shape":"ARN", + "documentation":"

The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "BackupSizeBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of a backup.

" + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + } + }, + "documentation":"

Contains detailed information about a saved recovery point.

" + }, + "RecoveryPointByResourceList":{ + "type":"list", + "member":{"shape":"RecoveryPointByResource"} + }, + "RecoveryPointCreator":{ + "type":"structure", + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "BackupPlanArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.

" + }, + "BackupPlanVersion":{ + "shape":"string", + "documentation":"

Version IDs are unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. They cannot be edited.

" + }, + "BackupRuleId":{ + "shape":"string", + "documentation":"

Uniquely identifies a rule used to schedule the backup of a selection of resources.

" + } + }, + "documentation":"

Contains information about the backup plan and rule that AWS Backup used to initiate the recovery point backup.

" + }, + "RecoveryPointStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "PARTIAL", + "DELETING", + "EXPIRED" + ] + }, + "ResourceArns":{ + "type":"list", + "member":{"shape":"ARN"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

A resource that is required for the action doesn't exist.

", + "exception":true + }, + "ResourceType":{ + "type":"string", + "pattern":"^[a-zA-Z0-9\\-\\_\\.]{1,50}$" + }, + "ResourceTypes":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, + "RestoreJobId":{"type":"string"}, + "RestoreJobStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "COMPLETED", + "ABORTED", + "FAILED" + ] + }, + "RestoreJobsList":{ + "type":"list", + "member":{"shape":"RestoreJobsListMember"} + }, + "RestoreJobsListMember":{ + "type":"structure", + "members":{ + "RestoreJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies the job that restores a recovery point.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a restore job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a job to restore a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "Status":{ + "shape":"RestoreJobStatus", + "documentation":"

A status code specifying the state of the job initiated by AWS Backup to restore a recovery point.

" + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

A detailed message explaining the status of the job to restore a recovery point.

" + }, + "PercentDone":{ + "shape":"string", + "documentation":"

Contains an estimated percentage complete of a job at the time the job status was queried.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of the restored resource.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "ExpectedCompletionTimeMinutes":{ + "shape":"Long", + "documentation":"

The amount of time in minutes that a job restoring a recovery point is expected to take.

" + }, + "CreatedResourceArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + } + }, + "documentation":"

Contains metadata about a restore job.

" + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

The request failed due to a temporary failure of the server.

", + "exception":true, + "fault":true + }, + "StartBackupJobInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "ResourceArn", + "IamRoleArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "IdempotencyToken":{ + "shape":"string", + "documentation":"

A customer chosen string that can be used to distinguish between calls to StartBackupJob. Idempotency tokens time out after one hour. Therefore, if you call StartBackupJob multiple times with the same idempotency token within one hour, AWS Backup recognizes that you are requesting only one backup job and initiates only one. If you change the idempotency token for each call, AWS Backup recognizes that you are requesting to start multiple backups.

" + }, + "StartWindowMinutes":{ + "shape":"WindowMinutes", + "documentation":"

The amount of time in minutes before beginning a backup.

" + }, + "CompleteWindowMinutes":{ + "shape":"WindowMinutes", + "documentation":"

The amount of time AWS Backup attempts a backup before canceling the job and returning an error.

" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "RecoveryPointTags":{ + "shape":"Tags", + "documentation":"

To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.

" + } + } + }, + "StartBackupJobOutput":{ + "type":"structure", + "members":{ + "BackupJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup job is started, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, + "StartRestoreJobInput":{ + "type":"structure", + "required":[ + "RecoveryPointArn", + "Metadata", + "IamRoleArn" + ], + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

A set of metadata key-value pairs. Lists the metadata that the recovery point was created with.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that AWS Backup uses to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "IdempotencyToken":{ + "shape":"string", + "documentation":"

A customer chosen string that can be used to distinguish between calls to StartRestoreJob. Idempotency tokens time out after one hour. Therefore, if you call StartRestoreJob multiple times with the same idempotency token within one hour, AWS Backup recognizes that you are requesting only one restore job and initiates only one. If you change the idempotency token for each call, AWS Backup recognizes that you are requesting to start multiple restores.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

Starts a job to restore a recovery point for one of the following resources:

" + } + } + }, + "StartRestoreJobOutput":{ + "type":"structure", + "members":{ + "RestoreJobId":{ + "shape":"RestoreJobId", + "documentation":"

Uniquely identifies the job that restores a recovery point.

" + } + } + }, + "StopBackupJobInput":{ + "type":"structure", + "required":["BackupJobId"], + "members":{ + "BackupJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

", + "location":"uri", + "locationName":"backupJobId" + } + } + }, + "StorageClass":{ + "type":"string", + "enum":[ + "WARM", + "COLD", + "DELETED" + ] + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"string"}, + "sensitive":true + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs that are used to help organize your resources. You can assign your own metadata to the resources you create.

" + } + } + }, + "TagValue":{"type":"string"}, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "sensitive":true + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeyList" + ], + "members":{ + "ResourceArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeyList":{ + "shape":"TagKeyList", + "documentation":"

A list of keys to identify which key-value tags to remove from a resource.

" + } + } + }, + "UpdateBackupPlanInput":{ + "type":"structure", + "required":[ + "BackupPlanId", + "BackupPlan" + ], + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

", + "location":"uri", + "locationName":"backupPlanId" + }, + "BackupPlan":{ + "shape":"BackupPlanInput", + "documentation":"

Specifies the body of a backup plan. Includes a BackupPlanName and one or more sets of Rules.

" + } + } + }, + "UpdateBackupPlanOutput":{ + "type":"structure", + "members":{ + "BackupPlanId":{ + "shape":"string", + "documentation":"

Uniquely identifies a backup plan.

" + }, + "BackupPlanArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a backup plan is updated, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "VersionId":{ + "shape":"string", + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version Ids cannot be edited.

" + } + } + }, + "UpdateRecoveryPointLifecycleInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "location":"uri", + "locationName":"recoveryPointArn" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + } + } + }, + "UpdateRecoveryPointLifecycleOutput":{ + "type":"structure", + "members":{ + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "Lifecycle":{ + "shape":"Lifecycle", + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + }, + "CalculatedLifecycle":{ + "shape":"CalculatedLifecycle", + "documentation":"

A CalculatedLifecycle object containing DeleteAt and MoveToColdStorageAt timestamps.

" + } + } + }, + "WindowMinutes":{"type":"long"}, + "boolean":{"type":"boolean"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + }, + "documentation":"AWS Backup

AWS Backup is a unified backup service designed to protect AWS services and their associated data. AWS Backup simplifies the creation, migration, restoration, and deletion of backups, while also providing reporting and auditing.

" +} diff --git a/botocore/data/batch/2016-08-10/paginators-1.json b/botocore/data/batch/2016-08-10/paginators-1.json index ea142457..ca39096b 100644 --- a/botocore/data/batch/2016-08-10/paginators-1.json +++ b/botocore/data/batch/2016-08-10/paginators-1.json @@ -1,3 +1,28 @@ { - "pagination": {} + "pagination": { + "DescribeComputeEnvironments": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "computeEnvironments" + }, + "DescribeJobDefinitions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "jobDefinitions" + }, + "DescribeJobQueues": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "jobQueues" + }, + "ListJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "jobSummaryList" + } + } } diff --git a/botocore/data/budgets/2016-10-20/paginators-1.json b/botocore/data/budgets/2016-10-20/paginators-1.json index ea142457..d66d30b3 100644 --- a/botocore/data/budgets/2016-10-20/paginators-1.json +++ b/botocore/data/budgets/2016-10-20/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "DescribeBudgets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Budgets" + }, + "DescribeNotificationsForBudget": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Notifications" + }, + "DescribeSubscribersForNotification": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Subscribers" + } + } } diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index b9044eec..4336b0cc 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -657,7 +657,7 @@ }, "GroupBy":{ "shape":"GroupDefinitions", - "documentation":"

You can group the data by the following attributes:

" + "documentation":"

You can group the data by the following attributes:

" }, "Granularity":{ "shape":"Granularity", @@ -667,7 +667,10 @@ "shape":"Expression", "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.

If you don't provide a SERVICE filter, Cost Explorer defaults to EC2.

" }, - "Metrics":{"shape":"MetricNames"}, + "Metrics":{ + "shape":"MetricNames", + "documentation":"

The measurement that you want your reservation coverage reported in.

Valid values are Hour, Unit, and Cost. You can use multiple values in a request.

" + }, "NextPageToken":{ "shape":"NextPageToken", "documentation":"

The token to retrieve the next set of results. AWS provides the token when the response from a previous call has more results than the maximum page size.

" @@ -1220,7 +1223,10 @@ "ReservationPurchaseRecommendationDetail":{ "type":"structure", "members":{ - "AccountId":{"shape":"GenericString"}, + "AccountId":{ + "shape":"GenericString", + "documentation":"

The account that this RI recommendation is for.

" + }, "InstanceDetails":{ "shape":"InstanceDetails", "documentation":"

Details about the instances that AWS recommends that you purchase.

" diff --git a/botocore/data/chime/2018-05-01/paginators-1.json b/botocore/data/chime/2018-05-01/paginators-1.json index ea142457..617b1149 100644 --- a/botocore/data/chime/2018-05-01/paginators-1.json +++ b/botocore/data/chime/2018-05-01/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListAccounts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Accounts" + }, + "ListUsers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Users" + } + } } diff --git a/botocore/data/chime/2018-05-01/service-2.json b/botocore/data/chime/2018-05-01/service-2.json index bbb92ec2..0e958229 100644 --- a/botocore/data/chime/2018-05-01/service-2.json +++ b/botocore/data/chime/2018-05-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Suspends up to 50 users from a Team or EnterpriseLWA Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Users suspended from a Team account are dissociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team account users, invite them to the Team account again. You can use the InviteUsers action to do so.

Users suspended from an EnterpriseLWA account are immediately signed out of Amazon Chime and are no longer able to sign in. To remove the suspension from suspended EnterpriseLWA account users, use the BatchUnsuspendUser action.

To sign out users without suspending them, use the LogoutUser action.

" + "documentation":"

Suspends up to 50 users from a Team or EnterpriseLWA Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Users suspended from a Team account are dissociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team account users, invite them to the Team account again. You can use the InviteUsers action to do so.

Users suspended from an EnterpriseLWA account are immediately signed out of Amazon Chime and are no longer able to sign in. To remove the suspension from suspended EnterpriseLWA account users, use the BatchUnsuspendUser action.

To sign out users without suspending them, use the LogoutUser action.

" }, "BatchUnsuspendUser":{ "name":"BatchUnsuspendUser", @@ -48,7 +48,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA account. Only users on EnterpriseLWA accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Previously suspended users who are unsuspended using this action are returned to Registered status. Users who are not previously suspended are ignored.

" + "documentation":"

Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA account. Only users on EnterpriseLWA accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Previously suspended users who are unsuspended using this action are returned to Registered status. Users who are not previously suspended are ignored.

" }, "BatchUpdateUser":{ "name":"BatchUpdateUser", @@ -88,7 +88,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an Amazon Chime account under the administrator's AWS account. Only Team account types are currently supported for this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" + "documentation":"

Creates an Amazon Chime account under the administrator's AWS account. Only Team account types are currently supported for this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" }, "DeleteAccount":{ "name":"DeleteAccount", @@ -147,7 +147,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + "documentation":"

Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" }, "GetUser":{ "name":"GetUser", @@ -307,7 +307,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the settings for the specified Amazon Chime account. You can update settings for remote control of shared screens, or for the dial-out option. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + "documentation":"

Updates the settings for the specified Amazon Chime account. You can update settings for remote control of shared screens, or for the dial-out option. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" }, "UpdateUser":{ "name":"UpdateUser", @@ -353,7 +353,7 @@ }, "AccountType":{ "shape":"AccountType", - "documentation":"

The Amazon Chime account type. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" + "documentation":"

The Amazon Chime account type. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" }, "CreatedTimestamp":{ "shape":"Iso8601Timestamp", @@ -389,10 +389,10 @@ }, "EnableDialOut":{ "shape":"Boolean", - "documentation":"

Setting that allows meeting participants to choose the Call me at a phone number option. For more information, see Join a Meeting without the Amazon Chime App.

" + "documentation":"

Setting that allows meeting participants to choose the Call me at a phone number option. For more information, see Join a Meeting without the Amazon Chime App.

" } }, - "documentation":"

Settings related to the Amazon Chime account. This includes settings that start or stop remote control of shared screens, or start or stop the dial-out option in the Amazon Chime web application. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + "documentation":"

Settings related to the Amazon Chime account. This includes settings that start or stop remote control of shared screens, or start or stop the dial-out option in the Amazon Chime web application. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" }, "AccountType":{ "type":"string", @@ -1145,5 +1145,5 @@ "member":{"shape":"User"} } }, - "documentation":"

The Amazon Chime API (application programming interface) is designed for administrators to use to perform key tasks, such as creating and managing Amazon Chime accounts and users. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes.

You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.

Using an AWS SDK

You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.

Using the AWS CLI

Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.

Using REST API

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.

When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com.

Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Control Access to the Amazon Chime Console in the Amazon Chime Administration Guide.

" + "documentation":"

The Amazon Chime API (application programming interface) is designed for administrators to use to perform key tasks, such as creating and managing Amazon Chime accounts and users. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes.

You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.

Using an AWS SDK

You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.

Using the AWS CLI

Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.

Using REST API

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.

When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com.

Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Control Access to the Amazon Chime Console in the Amazon Chime Administration Guide.

" } diff --git a/botocore/data/cloud9/2017-09-23/service-2.json b/botocore/data/cloud9/2017-09-23/service-2.json index 46ef6d69..607d192a 100644 --- a/botocore/data/cloud9/2017-09-23/service-2.json +++ b/botocore/data/cloud9/2017-09-23/service-2.json @@ -459,13 +459,18 @@ "ownerArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the environment owner.

" + }, + "lifecycle":{ + "shape":"EnvironmentLifecycle", + "documentation":"

The state of the environment in its creation or deletion lifecycle.

" } }, "documentation":"

Information about an AWS Cloud9 development environment.

" }, "EnvironmentDescription":{ "type":"string", - "max":200 + "max":200, + "sensitive":true }, "EnvironmentId":{ "type":"string", @@ -475,6 +480,32 @@ "type":"list", "member":{"shape":"EnvironmentId"} }, + "EnvironmentLifecycle":{ + "type":"structure", + "members":{ + "status":{ + "shape":"EnvironmentLifecycleStatus", + "documentation":"

The current creation or deletion lifecycle state of the environment.

" + }, + "reason":{ + "shape":"String", + "documentation":"

Any informational message about the lifecycle state of the environment.

" + }, + "failureResource":{ + "shape":"String", + "documentation":"

If the environment failed to delete, the Amazon Resource Name (ARN) of the related AWS resource.

" + } + }, + "documentation":"

Information about the current creation or deletion lifecycle state of an AWS Cloud9 development environment.

" + }, + "EnvironmentLifecycleStatus":{ + "type":"string", + "enum":[ + "CREATED", + "DELETING", + "DELETE_FAILED" + ] + }, "EnvironmentList":{ "type":"list", "member":{"shape":"Environment"} @@ -689,7 +720,7 @@ }, "UserArn":{ "type":"string", - "pattern":"arn:aws:(iam|sts)::\\d+:\\S+" + "pattern":"arn:aws:(iam|sts)::\\d+:(root|user|federated-user|assumed-role)\\/?\\S*" } }, "documentation":"AWS Cloud9

AWS Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud.

For more information about AWS Cloud9, see the AWS Cloud9 User Guide.

AWS Cloud9 supports these operations:

" diff --git a/botocore/data/clouddirectory/2017-01-11/paginators-1.json b/botocore/data/clouddirectory/2017-01-11/paginators-1.json index 22cc439e..5a06fb0b 100644 --- a/botocore/data/clouddirectory/2017-01-11/paginators-1.json +++ b/botocore/data/clouddirectory/2017-01-11/paginators-1.json @@ -95,6 +95,24 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "ListIncomingTypedLinks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LinkSpecifiers" + }, + "ListManagedSchemaArns": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SchemaArns" + }, + "ListOutgoingTypedLinks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TypedLinkSpecifiers" } } } diff --git a/botocore/data/cloudformation/2010-05-15/paginators-1.json b/botocore/data/cloudformation/2010-05-15/paginators-1.json index 8485fd56..90f678f8 100644 --- a/botocore/data/cloudformation/2010-05-15/paginators-1.json +++ b/botocore/data/cloudformation/2010-05-15/paginators-1.json @@ -1,5 +1,31 @@ { "pagination": { + "DescribeAccountLimits": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "AccountLimits" + }, + "DescribeChangeSet": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Changes", + "non_aggregate_keys": [ + "ChangeSetName", + "ChangeSetId", + "StackId", + "StackName", + "Description", + "Parameters", + "CreationTime", + "ExecutionStatus", + "Status", + "StatusReason", + "NotificationARNs", + "RollbackConfiguration", + "Capabilities", + "Tags" + ] + }, "DescribeStackEvents": { "input_token": "NextToken", "output_token": "NextToken", @@ -10,6 +36,17 @@ "output_token": "NextToken", "result_key": "Stacks" }, + "ListChangeSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Summaries" + }, + "ListStackInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Summaries" + }, "ListStackResources": { "input_token": "NextToken", "output_token": "NextToken", @@ -20,6 +57,24 @@ "output_token": "NextToken", "result_key": "StackSummaries" }, + "ListStackSetOperationResults": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Summaries" + }, + "ListStackSetOperations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Summaries" + }, + "ListStackSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Summaries" + }, "ListExports": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/botocore/data/cloudhsm/2014-05-30/paginators-1.json b/botocore/data/cloudhsm/2014-05-30/paginators-1.json index ea142457..3dedddf1 100644 --- a/botocore/data/cloudhsm/2014-05-30/paginators-1.json +++ b/botocore/data/cloudhsm/2014-05-30/paginators-1.json @@ -1,3 +1,19 @@ { - "pagination": {} + "pagination": { + "ListHapgs": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "HapgList" + }, + "ListHsms": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "HsmList" + }, + "ListLunaClients": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ClientList" + } + } } diff --git a/botocore/data/cloudtrail/2013-11-01/paginators-1.json b/botocore/data/cloudtrail/2013-11-01/paginators-1.json index b550340c..896776b5 100644 --- a/botocore/data/cloudtrail/2013-11-01/paginators-1.json +++ b/botocore/data/cloudtrail/2013-11-01/paginators-1.json @@ -5,6 +5,16 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Events" + }, + "ListPublicKeys": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "PublicKeyList" + }, + "ListTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ResourceTagList" } } } diff --git a/botocore/data/cloudwatch/2010-08-01/paginators-1.json b/botocore/data/cloudwatch/2010-08-01/paginators-1.json index b0bf527f..350b6368 100644 --- a/botocore/data/cloudwatch/2010-08-01/paginators-1.json +++ b/botocore/data/cloudwatch/2010-08-01/paginators-1.json @@ -21,6 +21,12 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": "Metrics" + }, + "GetMetricData": { + "input_token": "NextToken", + "limit_key": "MaxDatapoints", + "output_token": "NextToken", + "result_key": "MetricDataResults" } } } diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index 08811096..2377e401 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -137,7 +137,7 @@ "errors":[ {"shape":"InvalidNextToken"} ], - "documentation":"

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

" + "documentation":"

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

" }, "GetMetricStatistics":{ "name":"GetMetricStatistics", @@ -156,7 +156,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" }, "GetMetricWidgetImage":{ "name":"GetMetricWidgetImage", @@ -232,7 +232,7 @@ "errors":[ {"shape":"LimitExceededFault"} ], - "documentation":"

Creates or updates an alarm and associates it with the specified metric or metric math expression.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

" + "documentation":"

Creates or updates an alarm and associates it with the specified metric or metric math expression.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

" }, "PutMetricData":{ "name":"PutMetricData", @@ -247,7 +247,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

" + "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

" }, "SetAlarmState":{ "name":"SetAlarmState", @@ -784,11 +784,11 @@ }, "StartTime":{ "shape":"Timestamp", - "documentation":"

The time stamp indicating the earliest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the StartTime.

" + "documentation":"

The time stamp indicating the earliest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime.

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

The time stamp indicating the latest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the EndTime.

" + "documentation":"

The time stamp indicating the latest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the EndTime.

" }, "NextToken":{ "shape":"NextToken", @@ -838,7 +838,7 @@ }, "Dimensions":{ "shape":"Dimensions", - "documentation":"

The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. If a specific combination of dimensions was not published, you can't retrieve statistics for it. You must specify the same dimensions that were used when the metrics were created. For an example, see Dimension Combinations in the Amazon CloudWatch User Guide. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

" + "documentation":"

The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. If a specific combination of dimensions was not published, you can't retrieve statistics for it. You must specify the same dimensions that were used when the metrics were created. For an example, see Dimension Combinations in the Amazon CloudWatch User Guide. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

" }, "StartTime":{ "shape":"Timestamp", @@ -889,7 +889,7 @@ }, "OutputFormat":{ "shape":"OutputFormat", - "documentation":"

The format of the resulting image. Only PNG images are supported.

The default is png. If you specify png, the API returns an HTTP response with the content-type set to text/xml. The image data is in a MetricWidgetImage field. For example:

<GetMetricWidgetImageResponse xmlns=\"http://monitoring.amazonaws.com/doc/2010-08-01/\">

<GetMetricWidgetImageResult>

<MetricWidgetImage>

iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...

</MetricWidgetImage>

</GetMetricWidgetImageResult>

<ResponseMetadata>

<RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>

</ResponseMetadata>

</GetMetricWidgetImageResponse>

The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an AWS SDK, you should use png. If you specify image/png, the HTTP response has a content-type set to image/png, and the body of the response is a PNG image.

" + "documentation":"

The format of the resulting image. Only PNG images are supported.

The default is png. If you specify png, the API returns an HTTP response with the content-type set to text/xml. The image data is in a MetricWidgetImage field. For example:

<GetMetricWidgetImageResponse xmlns=<URLstring>>

<GetMetricWidgetImageResult>

<MetricWidgetImage>

iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...

</MetricWidgetImage>

</GetMetricWidgetImageResult>

<ResponseMetadata>

<RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>

</ResponseMetadata>

</GetMetricWidgetImageResponse>

The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an AWS SDK, you should use png. If you specify image/png, the HTTP response has a content-type set to image/png, and the body of the response is a PNG image.

" } } }, @@ -1292,7 +1292,7 @@ }, "Expression":{ "shape":"MetricExpression", - "documentation":"

The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" + "documentation":"

The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both.

" }, "Label":{ "shape":"MetricLabel", @@ -1303,7 +1303,7 @@ "documentation":"

When used in GetMetricData, this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify False. If you omit this, the default of True is used.

When used in PutMetricAlarm, specify True for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm operation, specify ReturnData as False.

" } }, - "documentation":"

This structure is used in both GetMetricData and PutMetricAlarm. The supported use of this structure is different for those two operations.

When used in GetMetricData, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 100 MetricDataQuery structures.

When used in PutMetricAlarm, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.

" + "documentation":"

This structure is used in both GetMetricData and PutMetricAlarm. The supported use of this structure is different for those two operations.

When used in GetMetricData, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a math expression on metric data. A single GetMetricData call can include up to 100 MetricDataQuery structures.

When used in PutMetricAlarm, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm call can include up to 20 MetricDataQuery structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that contain the Expression parameter to perform a math expression. Of those Expression structures, one must have True as the value for ReturnData. The result of this expression is the value the alarm watches.

Any expression used in a PutMetricAlarm operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData operation or a PutMetricAlarm operation. These differences are explained in the following parameter list.

" }, "MetricDataResult":{ "type":"structure", @@ -1381,7 +1381,7 @@ }, "StorageResolution":{ "shape":"StorageResolution", - "documentation":"

Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide.

This field is optional, if you do not specify it the default of 60 is used.

" + "documentation":"

Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide.

This field is optional, if you do not specify it the default of 60 is used.

" } }, "documentation":"

Encapsulates the information sent to either create a metric or add new values to be aggregated into an existing metric.

" @@ -1521,11 +1521,11 @@ }, "AlarmActions":{ "shape":"ResourceList", - "documentation":"

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" + "documentation":"

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" }, "InsufficientDataActions":{ "shape":"ResourceList", - "documentation":"

The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): >arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" + "documentation":"

The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:automate:region:ec2:reboot | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): >arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" }, "MetricName":{ "shape":"MetricName", @@ -1561,7 +1561,7 @@ }, "DatapointsToAlarm":{ "shape":"DatapointsToAlarm", - "documentation":"

The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an \"M out of N\" alarm. In that case, this value is the M. For more information, see Evaluating an Alarm in the Amazon CloudWatch User Guide.

" + "documentation":"

The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an \"M out of N\" alarm. In that case, this value is the M. For more information, see Evaluating an Alarm in the Amazon CloudWatch User Guide.

" }, "Threshold":{ "shape":"Threshold", @@ -1573,15 +1573,15 @@ }, "TreatMissingData":{ "shape":"TreatMissingData", - "documentation":"

Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data.

Valid Values: breaching | notBreaching | ignore | missing

" + "documentation":"

Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data.

Valid Values: breaching | notBreaching | ignore | missing

" }, "EvaluateLowSampleCountPercentile":{ "shape":"EvaluateLowSampleCountPercentile", - "documentation":"

Used only for alarms based on percentiles. If you specify ignore, the alarm state does not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm is always evaluated and possibly changes state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples.

Valid Values: evaluate | ignore

" + "documentation":"

Used only for alarms based on percentiles. If you specify ignore, the alarm state does not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm is always evaluated and possibly changes state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples.

Valid Values: evaluate | ignore

" }, "Metrics":{ "shape":"MetricDataQueries", - "documentation":"

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. Each item in the Metrics array either retrieves a metric or performs a math expression.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

" + "documentation":"

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. Each item in the Metrics array either retrieves a metric or performs a math expression.

One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnValue to true for this object in the array. For more information, see MetricDataQuery.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

" } } }, diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 896d5bba..2e90b387 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -80,7 +80,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

" + "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

" }, "DeleteProject":{ "name":"DeleteProject", @@ -603,11 +603,20 @@ "type":"list", "member":{"shape":"BuildNotDeleted"} }, + "CacheMode":{ + "type":"string", + "enum":[ + "LOCAL_DOCKER_LAYER_CACHE", + "LOCAL_SOURCE_CACHE", + "LOCAL_CUSTOM_CACHE" + ] + }, "CacheType":{ "type":"string", "enum":[ "NO_CACHE", - "S3" + "S3", + "LOCAL" ] }, "CloudWatchLogsConfig":{ @@ -620,11 +629,11 @@ }, "groupName":{ "shape":"String", - "documentation":"

The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" + "documentation":"

The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" }, "streamName":{ "shape":"String", - "documentation":"

The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" + "documentation":"

The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

" } }, "documentation":"

Information about Amazon CloudWatch Logs for a build project.

" @@ -732,7 +741,11 @@ }, "branchFilter":{ "shape":"String", - "documentation":"

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

" + "documentation":"

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

" + }, + "filterGroups":{ + "shape":"FilterGroups", + "documentation":"

An array of arrays of WebhookFilter objects used to determine which webhooks are triggered. At least one WebhookFilter in the array must specify EVENT as its type.

For a build to be triggered, at least one filter group in the filterGroups array must pass. For a filter group to pass, each of its filters must pass.

" } } }, @@ -745,6 +758,10 @@ } } }, + "CredentialProviderType":{ + "type":"string", + "enum":["SECRETS_MANAGER"] + }, "DeleteProjectInput":{ "type":"structure", "required":["name"], @@ -892,10 +909,25 @@ "type":"list", "member":{"shape":"EnvironmentVariable"} }, + "FilterGroup":{ + "type":"list", + "member":{"shape":"WebhookFilter"} + }, + "FilterGroups":{ + "type":"list", + "member":{"shape":"FilterGroup"} + }, "GitCloneDepth":{ "type":"integer", "min":0 }, + "ImagePullCredentialsType":{ + "type":"string", + "enum":[ + "CODEBUILD", + "SERVICE_ROLE" + ] + }, "ImageVersions":{ "type":"list", "member":{"shape":"String"} @@ -1351,15 +1383,23 @@ "members":{ "type":{ "shape":"CacheType", - "documentation":"

The type of cache used by the build project. Valid values include:

" + "documentation":"

The type of cache used by the build project. Valid values include:

" }, "location":{ "shape":"String", - "documentation":"

Information about the cache location:

" + "documentation":"

Information about the cache location:

" + }, + "modes":{ + "shape":"ProjectCacheModes", + "documentation":"

If you use a LOCAL cache, the local cache mode. You can use one or more local cache modes at the same time.

" } }, "documentation":"

Information about the cache for the build project.

" }, + "ProjectCacheModes":{ + "type":"list", + "member":{"shape":"CacheMode"} + }, "ProjectDescription":{ "type":"string", "max":255, @@ -1379,7 +1419,7 @@ }, "image":{ "shape":"NonEmptyString", - "documentation":"

The ID of the Docker image to use for this build project.

" + "documentation":"

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

" }, "computeType":{ "shape":"ComputeType", @@ -1396,6 +1436,14 @@ "certificate":{ "shape":"String", "documentation":"

The certificate to use with this build project.

" + }, + "registryCredential":{ + "shape":"RegistryCredential", + "documentation":"

The credentials for access to a private registry.

" + }, + "imagePullCredentialsType":{ + "shape":"ImagePullCredentialsType", + "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" } }, "documentation":"

Information about the build environment of the build project.

" @@ -1493,6 +1541,24 @@ "type":"list", "member":{"shape":"Project"} }, + "RegistryCredential":{ + "type":"structure", + "required":[ + "credential", + "credentialProvider" + ], + "members":{ + "credential":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current region.

" + }, + "credentialProvider":{ + "shape":"CredentialProviderType", + "documentation":"

The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.

" + } + }, + "documentation":"

Information about credentials that provide access to a private Docker registry. When this is set:

For more information, see Private Registry with AWS Secrets Manager Sample for AWS CodeBuild.

" + }, "ResourceAlreadyExistsException":{ "type":"structure", "members":{ @@ -1703,6 +1769,14 @@ "logsConfigOverride":{ "shape":"LogsConfig", "documentation":"

Log settings for this build that override the log settings defined in the build project.

" + }, + "registryCredentialOverride":{ + "shape":"RegistryCredential", + "documentation":"

The credentials for access to a private registry.

" + }, + "imagePullCredentialsTypeOverride":{ + "shape":"ImagePullCredentialsType", + "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" } } }, @@ -1866,11 +1940,15 @@ }, "branchFilter":{ "shape":"String", - "documentation":"

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

" + "documentation":"

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

" }, "rotateSecret":{ "shape":"Boolean", "documentation":"

A boolean value that specifies whether the associated GitHub repository's secret token should be updated. If you use Bitbucket for your repository, rotateSecret is ignored.

" + }, + "filterGroups":{ + "shape":"FilterGroups", + "documentation":"

An array of arrays of WebhookFilter objects used to determine if a webhook event can trigger a build. A filter group must pcontain at least one EVENT WebhookFilter.

" } } }, @@ -1924,7 +2002,11 @@ }, "branchFilter":{ "shape":"String", - "documentation":"

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

" + "documentation":"

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

" + }, + "filterGroups":{ + "shape":"FilterGroups", + "documentation":"

An array of arrays of WebhookFilter objects used to determine which webhooks are triggered. At least one WebhookFilter in the array must specify EVENT as its type.

For a build to be triggered, at least one filter group in the filterGroups array must pass. For a filter group to pass, each of its filters must pass.

" }, "lastModifiedSecret":{ "shape":"Timestamp", @@ -1933,6 +2015,38 @@ }, "documentation":"

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

" }, + "WebhookFilter":{ + "type":"structure", + "required":[ + "type", + "pattern" + ], + "members":{ + "type":{ + "shape":"WebhookFilterType", + "documentation":"

The type of webhook filter. There are five webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, and FILE_PATH.

EVENT

A webhook event triggers a build when the provided pattern matches one of four event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, and PULL_REQUEST_REOPENED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events.

The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only.

ACTOR_ACCOUNT_ID

A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern.

HEAD_REF

A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name.

Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

BASE_REF

A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name.

Works with pull request events only.

FILE_PATH

A webhook triggers a build when the path of a changed file matches the regular expression pattern.

Works with GitHub and GitHub Enterprise push events only.

" + }, + "pattern":{ + "shape":"String", + "documentation":"

For a WebHookFilter that uses EVENT type, a comma-separated string that specifies one or more events. For example, the webhook filter PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED allows all push, pull request created, and pull request updated events to trigger a build.

For a WebHookFilter that uses any of the other filter types, a regular expression pattern. For example, a WebHookFilter that uses HEAD_REF for its type and the pattern ^refs/heads/ triggers a build when the head reference is a branch with a reference name refs/heads/branch-name.

" + }, + "excludeMatchedPattern":{ + "shape":"WrapperBoolean", + "documentation":"

Used to indicate that the pattern determines which webhook events do not trigger a build. If true, then a webhook event that does not match the pattern triggers a build. If false, then a webhook event that matches the pattern triggers a build.

" + } + }, + "documentation":"

A filter used to determine which webhooks trigger a build.

" + }, + "WebhookFilterType":{ + "type":"string", + "enum":[ + "EVENT", + "BASE_REF", + "HEAD_REF", + "ACTOR_ACCOUNT_ID", + "FILE_PATH" + ] + }, "WrapperBoolean":{"type":"boolean"}, "WrapperInt":{"type":"integer"}, "WrapperLong":{"type":"long"} diff --git a/botocore/data/codecommit/2015-04-13/service-2.json b/botocore/data/codecommit/2015-04-13/service-2.json index c6968c3f..b6d032f5 100644 --- a/botocore/data/codecommit/2015-04-13/service-2.json +++ b/botocore/data/codecommit/2015-04-13/service-2.json @@ -58,6 +58,56 @@ ], "documentation":"

Creates a new branch in a repository and points the branch to a commit.

Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.

" }, + "CreateCommit":{ + "name":"CreateCommit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCommitInput"}, + "output":{"shape":"CreateCommitOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"ParentCommitIdRequiredException"}, + {"shape":"InvalidParentCommitIdException"}, + {"shape":"ParentCommitDoesNotExistException"}, + {"shape":"ParentCommitIdOutdatedException"}, + {"shape":"BranchNameRequiredException"}, + {"shape":"InvalidBranchNameException"}, + {"shape":"BranchDoesNotExistException"}, + {"shape":"BranchNameIsTagNameException"}, + {"shape":"FileEntryRequiredException"}, + {"shape":"MaximumFileEntriesExceededException"}, + {"shape":"PutFileEntryConflictException"}, + {"shape":"SourceFileOrContentRequiredException"}, + {"shape":"FileContentAndSourceFileSpecifiedException"}, + {"shape":"PathRequiredException"}, + {"shape":"InvalidPathException"}, + {"shape":"SamePathRequestException"}, + {"shape":"FileDoesNotExistException"}, + {"shape":"FileContentSizeLimitExceededException"}, + {"shape":"FolderContentSizeLimitExceededException"}, + {"shape":"InvalidDeletionParameterException"}, + {"shape":"RestrictedSourceFileException"}, + {"shape":"FileModeRequiredException"}, + {"shape":"InvalidFileModeException"}, + {"shape":"NameLengthExceededException"}, + {"shape":"InvalidEmailException"}, + {"shape":"CommitMessageLengthExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"}, + {"shape":"NoChangeException"}, + {"shape":"FileNameConflictsWithDirectoryNameException"}, + {"shape":"DirectoryNameConflictsWithFileNameException"}, + {"shape":"FilePathConflictsWithSubmodulePathException"} + ], + "documentation":"

Creates a commit for a repository on the tip of a specified branch.

" + }, "CreatePullRequest":{ "name":"CreatePullRequest", "http":{ @@ -746,6 +796,7 @@ {"shape":"ParentCommitIdOutdatedException"}, {"shape":"FileContentRequiredException"}, {"shape":"FileContentSizeLimitExceededException"}, + {"shape":"FolderContentSizeLimitExceededException"}, {"shape":"PathRequiredException"}, {"shape":"InvalidPathException"}, {"shape":"BranchNameRequiredException"}, @@ -764,7 +815,8 @@ {"shape":"EncryptionKeyUnavailableException"}, {"shape":"SameFileContentException"}, {"shape":"FileNameConflictsWithDirectoryNameException"}, - {"shape":"DirectoryNameConflictsWithFileNameException"} + {"shape":"DirectoryNameConflictsWithFileNameException"}, + {"shape":"FilePathConflictsWithSubmodulePathException"} ], "documentation":"

Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.

" }, @@ -1380,6 +1432,80 @@ }, "documentation":"

Represents the input of a create branch operation.

" }, + "CreateCommitInput":{ + "type":"structure", + "required":[ + "repositoryName", + "branchName" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository where you will create the commit.

" + }, + "branchName":{ + "shape":"BranchName", + "documentation":"

The name of the branch where you will create the commit.

" + }, + "parentCommitId":{ + "shape":"CommitId", + "documentation":"

The ID of the commit that is the parent of the commit you will create. If this is an empty repository, this is not required.

" + }, + "authorName":{ + "shape":"Name", + "documentation":"

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

" + }, + "email":{ + "shape":"Email", + "documentation":"

The email address of the person who created the commit.

" + }, + "commitMessage":{ + "shape":"Message", + "documentation":"

The commit message you want to include as part of creating the commit. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.

" + }, + "keepEmptyFolders":{ + "shape":"KeepEmptyFolders", + "documentation":"

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders.

" + }, + "putFiles":{ + "shape":"PutFileEntries", + "documentation":"

The files to add or update in this commit.

" + }, + "deleteFiles":{ + "shape":"DeleteFileEntries", + "documentation":"

The files to delete in this commit. These files will still exist in prior commits.

" + }, + "setFileModes":{ + "shape":"SetFileModeEntries", + "documentation":"

The file modes to update for files in this commit.

" + } + } + }, + "CreateCommitOutput":{ + "type":"structure", + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"

The full commit ID of the commit that contains your committed file changes.

" + }, + "treeId":{ + "shape":"ObjectId", + "documentation":"

The full SHA-1 pointer of the tree information for the commit that contains the commited file changes.

" + }, + "filesAdded":{ + "shape":"FilesMetadata", + "documentation":"

The files added as part of the committed file changes.

" + }, + "filesUpdated":{ + "shape":"FilesMetadata", + "documentation":"

The files updated as part of the commited file changes.

" + }, + "filesDeleted":{ + "shape":"FilesMetadata", + "documentation":"

The files deleted as part of the committed file changes.

" + } + } + }, "CreatePullRequestInput":{ "type":"structure", "required":[ @@ -1497,6 +1623,21 @@ } } }, + "DeleteFileEntries":{ + "type":"list", + "member":{"shape":"DeleteFileEntry"} + }, + "DeleteFileEntry":{ + "type":"structure", + "required":["filePath"], + "members":{ + "filePath":{ + "shape":"Path", + "documentation":"

The full path of the file that will be deleted, including the name of the file.

" + } + }, + "documentation":"

A file that will be deleted as part of a commit.

" + }, "DeleteFileInput":{ "type":"structure", "required":[ @@ -1725,6 +1866,13 @@ "type":"blob", "max":6291456 }, + "FileContentAndSourceFileSpecifiedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because both a source file and file content have been specified for the same file. You cannot provide both. Either specify a source file, or provide the file content directly.

", + "exception":true + }, "FileContentRequiredException":{ "type":"structure", "members":{ @@ -1736,7 +1884,7 @@ "type":"structure", "members":{ }, - "documentation":"

The file cannot be added because it is too large. The maximum file size that can be added using PutFile is 6 MB. For files larger than 6 MB but smaller than 2 GB, add them using a Git client.

", + "documentation":"

The file cannot be added because it is too large. The maximum file size that can be added using PutFile is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

", "exception":true }, "FileDoesNotExistException":{ @@ -1746,10 +1894,42 @@ "documentation":"

The specified file does not exist. Verify that you have provided the correct name of the file, including its full path and extension.

", "exception":true }, + "FileEntryRequiredException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because no files have been specified as added, updated, or changed (PutFile or DeleteFile) for the commit.

", + "exception":true + }, "FileList":{ "type":"list", "member":{"shape":"File"} }, + "FileMetadata":{ + "type":"structure", + "members":{ + "absolutePath":{ + "shape":"Path", + "documentation":"

The full path to the file that will be added or updated, including the name of the file.

" + }, + "blobId":{ + "shape":"ObjectId", + "documentation":"

The blob ID that contains the file information.

" + }, + "fileMode":{ + "shape":"FileModeTypeEnum", + "documentation":"

The extrapolated file mode permissions for the file. Valid values include EXECUTABLE and NORMAL.

" + } + }, + "documentation":"

A file that will be added, updated, or deleted as part of a commit.

" + }, + "FileModeRequiredException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because a file mode is required to update mode permissions for an existing file, but no file mode has been specified.

", + "exception":true + }, "FileModeTypeEnum":{ "type":"string", "enum":[ @@ -1765,6 +1945,13 @@ "documentation":"

A file cannot be added to the repository because the specified file name has the same name as a directory in this repository. Either provide another name for the file, or add the file in a directory that does not match the file name.

", "exception":true }, + "FilePathConflictsWithSubmodulePathException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because a specified file path points to a submodule. Verify that the destination files have valid file paths that do not point to a submodule.

", + "exception":true + }, "FileTooLargeException":{ "type":"structure", "members":{ @@ -1772,6 +1959,10 @@ "documentation":"

The specified file exceeds the file size limit for AWS CodeCommit. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

", "exception":true }, + "FilesMetadata":{ + "type":"list", + "member":{"shape":"FileMetadata"} + }, "Folder":{ "type":"structure", "members":{ @@ -1790,6 +1981,13 @@ }, "documentation":"

Returns information about a folder in a repository.

" }, + "FolderContentSizeLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because at least one of the overall changes in the commit result in a folder contents exceeding the limit of 6 MB. Either reduce the number and size of your changes, or split the changes across multiple folders.

", + "exception":true + }, "FolderDoesNotExistException":{ "type":"structure", "members":{ @@ -2558,6 +2756,7 @@ "IsCommentDeleted":{"type":"boolean"}, "IsMergeable":{"type":"boolean"}, "IsMerged":{"type":"boolean"}, + "IsMove":{"type":"boolean"}, "KeepEmptyFolders":{"type":"boolean"}, "LastModifiedDate":{"type":"timestamp"}, "Limit":{ @@ -2698,6 +2897,13 @@ "documentation":"

The number of branches for the trigger was exceeded.

", "exception":true }, + "MaximumFileEntriesExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The number of specified files to change as part of this commit exceeds the maximum number of files that can be changed in a single commit. Consider using a Git client for these changes.

", + "exception":true + }, "MaximumOpenPullRequestsExceededException":{ "type":"structure", "members":{ @@ -2792,6 +2998,13 @@ "exception":true }, "NextToken":{"type":"string"}, + "NoChangeException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because no changes will be made to the repository as a result of this commit. A commit must contain at least one change.

", + "exception":true + }, "ObjectId":{"type":"string"}, "ObjectSize":{"type":"long"}, "OrderEnum":{ @@ -3262,6 +3475,40 @@ "type":"list", "member":{"shape":"PullRequestTarget"} }, + "PutFileEntries":{ + "type":"list", + "member":{"shape":"PutFileEntry"} + }, + "PutFileEntry":{ + "type":"structure", + "required":["filePath"], + "members":{ + "filePath":{ + "shape":"Path", + "documentation":"

The full path to the file in the repository, including the name of the file.

" + }, + "fileMode":{ + "shape":"FileModeTypeEnum", + "documentation":"

The extrapolated file mode permissions for the file. Valid values include EXECUTABLE and NORMAL.

" + }, + "fileContent":{ + "shape":"FileContent", + "documentation":"

The content of the file, if a source file is not specified.

" + }, + "sourceFile":{ + "shape":"SourceFileSpecifier", + "documentation":"

The name and full path of the file that contains the changes you want to make as part of the commit, if you are not providing the file content directly.

" + } + }, + "documentation":"

Information about a file that will be added or updated as part of a commit.

" + }, + "PutFileEntryConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because one or more files specified in the commit reference both a file and a folder.

", + "exception":true + }, "PutFileInput":{ "type":"structure", "required":[ @@ -3510,7 +3757,7 @@ "type":"structure", "members":{ }, - "documentation":"

The repository does not contain any pull requests with that pull request ID. Check to make sure you have provided the correct repository name for the pull request.

", + "documentation":"

The repository does not contain any pull requests with that pull request ID. Use GetPullRequest to verify the correct repository name for the pull request ID.

", "exception":true }, "RepositoryNotFoundList":{ @@ -3626,6 +3873,13 @@ "documentation":"

The list of triggers for the repository is required but was not specified.

", "exception":true }, + "RestrictedSourceFileException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because one of the changes specifies copying or moving a .gitkeep file.

", + "exception":true + }, "SameFileContentException":{ "type":"structure", "members":{ @@ -3633,6 +3887,35 @@ "documentation":"

The file was not added or updated because the content of the file is exactly the same as the content of that file in the repository and branch that you specified.

", "exception":true }, + "SamePathRequestException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because one or more changes in this commit duplicate actions in the same file path. For example, you cannot make the same delete request to the same file in the same file path twice, or make a delete request and a move request to the same file as part of the same commit.

", + "exception":true + }, + "SetFileModeEntries":{ + "type":"list", + "member":{"shape":"SetFileModeEntry"} + }, + "SetFileModeEntry":{ + "type":"structure", + "required":[ + "filePath", + "fileMode" + ], + "members":{ + "filePath":{ + "shape":"Path", + "documentation":"

The full path to the file, including the name of the file.

" + }, + "fileMode":{ + "shape":"FileModeTypeEnum", + "documentation":"

The file mode for the file.

" + } + }, + "documentation":"

Information about the file mode changes.

" + }, "SortByEnum":{ "type":"string", "enum":[ @@ -3647,6 +3930,28 @@ "documentation":"

The source branch and the destination branch for the pull request are the same. You must specify different branches for the source and destination.

", "exception":true }, + "SourceFileOrContentRequiredException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The commit cannot be created because no source files or file content have been specified for the commit.

", + "exception":true + }, + "SourceFileSpecifier":{ + "type":"structure", + "required":["filePath"], + "members":{ + "filePath":{ + "shape":"Path", + "documentation":"

The full path to the file, including the name of the file.

" + }, + "isMove":{ + "shape":"IsMove", + "documentation":"

Whether to remove the source file from the parent commit.

" + } + }, + "documentation":"

Information about a source file that is part of changes made in a commit.

" + }, "SubModule":{ "type":"structure", "members":{ @@ -3970,5 +4275,5 @@ }, "blob":{"type":"blob"} }, - "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

Branches, by calling the following:

Files, by calling the following:

Information about committed code in a repository, by calling the following:

Pull requests, by calling the following:

Information about comments in a repository, by calling the following:

Triggers, by calling the following:

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" + "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

Branches, by calling the following:

Files, by calling the following:

Information about committed code in a repository, by calling the following:

Pull requests, by calling the following:

Information about comments in a repository, by calling the following:

Triggers, by calling the following:

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" } diff --git a/botocore/data/codedeploy/2014-10-06/paginators-1.json b/botocore/data/codedeploy/2014-10-06/paginators-1.json index cea07e68..aae3fad3 100644 --- a/botocore/data/codedeploy/2014-10-06/paginators-1.json +++ b/botocore/data/codedeploy/2014-10-06/paginators-1.json @@ -29,6 +29,21 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "deployments" + }, + "ListDeploymentTargets": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "targetIds" + }, + "ListGitHubAccountTokenNames": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tokenNameList" + }, + "ListOnPremisesInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "instanceNames" } } } diff --git a/botocore/data/codedeploy/2014-10-06/service-2.json b/botocore/data/codedeploy/2014-10-06/service-2.json index eeafd4e7..9159b4ca 100644 --- a/botocore/data/codedeploy/2014-10-06/service-2.json +++ b/botocore/data/codedeploy/2014-10-06/service-2.json @@ -101,7 +101,7 @@ {"shape":"BatchLimitExceededException"}, {"shape":"InvalidComputePlatformException"} ], - "documentation":"

This method works, but is considered deprecated. Use BatchGetDeploymentTargets instead.

Returns an array of instances associated with a deployment. This method works with EC2/On-premises and AWS Lambda compute platforms. The newer BatchGetDeploymentTargets works with all compute platforms.

", + "documentation":"

This method works, but is deprecated. Use BatchGetDeploymentTargets instead.

Returns an array of instances associated with a deployment. This method works with EC2/On-premises and AWS Lambda compute platforms. The newer BatchGetDeploymentTargets works with all compute platforms.

", "deprecated":true, "deprecatedMessage":"This operation is deprecated, use BatchGetDeploymentTargets instead." }, @@ -122,7 +122,7 @@ {"shape":"DeploymentTargetDoesNotExistException"}, {"shape":"DeploymentTargetListSizeExceededException"} ], - "documentation":"

Returns an array of targets associated with a deployment. This method works with all compute types and should be used instead of the deprecated BatchGetDeploymentInstances.

The type of targets returned depends on the deployment's compute platform:

" + "documentation":"

Returns an array of targets associated with a deployment. This method works with all compute types and should be used instead of the deprecated BatchGetDeploymentInstances.

The type of targets returned depends on the deployment's compute platform:

" }, "BatchGetDeployments":{ "name":"BatchGetDeployments", @@ -285,7 +285,7 @@ {"shape":"InvalidTargetGroupPairException"}, {"shape":"ECSServiceMappingLimitExceededException"} ], - "documentation":"

Creates a deployment group to which application revisions will be deployed.

" + "documentation":"

Creates a deployment group to which application revisions are deployed.

" }, "DeleteApplication":{ "name":"DeleteApplication", @@ -533,7 +533,7 @@ "errors":[ {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists the applications registered with the applicable IAM user or AWS account.

" + "documentation":"

Lists the applications registered with the IAM user or AWS account.

" }, "ListDeploymentConfigs":{ "name":"ListDeploymentConfigs", @@ -546,7 +546,7 @@ "errors":[ {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists the deployment configurations with the applicable IAM user or AWS account.

" + "documentation":"

Lists the deployment configurations with the IAM user or AWS account.

" }, "ListDeploymentGroups":{ "name":"ListDeploymentGroups", @@ -562,7 +562,7 @@ {"shape":"ApplicationDoesNotExistException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists the deployment groups for an application registered with the applicable IAM user or AWS account.

" + "documentation":"

Lists the deployment groups for an application registered with the IAM user or AWS account.

" }, "ListDeploymentInstances":{ "name":"ListDeploymentInstances", @@ -584,7 +584,7 @@ {"shape":"InvalidTargetFilterNameException"}, {"shape":"InvalidComputePlatformException"} ], - "documentation":"

The newer BatchGetDeploymentTargets should be used instead because it works with all compute types. ListDeploymentInstances throws an exception if it is used with a compute platform other than EC2/On-premises or AWS Lambda.

Lists the instance for a deployment associated with the applicable IAM user or AWS account.

", + "documentation":"

The newer BatchGetDeploymentTargets should be used instead because it works with all compute types. ListDeploymentInstances throws an exception if it is used with a compute platform other than EC2/On-premises or AWS Lambda.

Lists the instance for a deployment associated with the IAM user or AWS account.

", "deprecated":true, "deprecatedMessage":"This operation is deprecated, use ListDeploymentTargets instead." }, @@ -627,7 +627,7 @@ {"shape":"InvalidDeploymentStatusException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists the deployments in a deployment group for an application registered with the applicable IAM user or AWS account.

" + "documentation":"

Lists the deployments in a deployment group for an application registered with the IAM user or AWS account.

" }, "ListGitHubAccountTokenNames":{ "name":"ListGitHubAccountTokenNames", @@ -657,7 +657,7 @@ {"shape":"InvalidTagFilterException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Gets a list of names for one or more on-premises instances.

Unless otherwise specified, both registered and deregistered on-premises instance names will be listed. To list only registered or deregistered on-premises instance names, use the registration status parameter.

" + "documentation":"

Gets a list of names for one or more on-premises instances.

Unless otherwise specified, both registered and deregistered on-premises instance names are listed. To list only registered or deregistered on-premises instance names, use the registration status parameter.

" }, "PutLifecycleEventHookExecutionStatus":{ "name":"PutLifecycleEventHookExecutionStatus", @@ -749,7 +749,7 @@ {"shape":"DeploymentNotStartedException"}, {"shape":"UnsupportedActionForDeploymentTypeException"} ], - "documentation":"

In a blue/green deployment, overrides any specified wait time and starts terminating instances immediately after the traffic routing is completed.

", + "documentation":"

In a blue/green deployment, overrides any specified wait time and starts terminating instances immediately after the traffic routing is complete.

", "deprecated":true, "deprecatedMessage":"This operation is deprecated, use ContinueDeployment with DeploymentWaitType instead." }, @@ -871,7 +871,7 @@ }, "ignorePollAlarmFailure":{ "shape":"Boolean", - "documentation":"

Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.

" + "documentation":"

Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.

" }, "alarms":{ "shape":"AlarmList", @@ -897,7 +897,7 @@ "members":{ "content":{ "shape":"RawStringContent", - "documentation":"

The YAML-formatted or JSON-formatted revision string.

For an AWS Lambda deployment the content includes a Lambda function name, the alias for its original version, and the alias for its replacement version. The deployment shifts traffic from the original version of the Lambda function to the replacement version.

For an Amazon ECS deployment the content includes the task name, information about the load balancer that serves traffic to the container, and more.

For both types of deployments, the content can specify Lambda functions that run at specified hooks, such as BeforeInstall, during a deployment.

" + "documentation":"

The YAML-formatted or JSON-formatted revision string.

For an AWS Lambda deployment, the content includes a Lambda function name, the alias for its original version, and the alias for its replacement version. The deployment shifts traffic from the original version of the Lambda function to the replacement version.

For an Amazon ECS deployment, the content includes the task name, information about the load balancer that serves traffic to the container, and more.

For both types of deployments, the content can specify Lambda functions that run at specified hooks, such as BeforeInstall, during a deployment.

" }, "sha256":{ "shape":"RawStringSha256", @@ -910,14 +910,14 @@ "type":"structure", "members":{ }, - "documentation":"

An application with the specified name already exists with the applicable IAM user or AWS account.

", + "documentation":"

An application with the specified name with the IAM user or AWS account already exists.

", "exception":true }, "ApplicationDoesNotExistException":{ "type":"structure", "members":{ }, - "documentation":"

The application does not exist with the applicable IAM user or AWS account.

", + "documentation":"

The application does not exist with the IAM user or AWS account.

", "exception":true }, "ApplicationId":{"type":"string"}, @@ -938,7 +938,7 @@ }, "linkedToGitHub":{ "shape":"Boolean", - "documentation":"

True if the user has authenticated with GitHub for the specified application; otherwise, false.

" + "documentation":"

True if the user has authenticated with GitHub for the specified application. Otherwise, false.

" }, "gitHubAccountName":{ "shape":"GitHubAccountTokenName", @@ -998,7 +998,7 @@ "documentation":"

The event type or types that trigger a rollback.

" } }, - "documentation":"

Information about a configuration for automatically rolling back to a previous version of an application revision when a deployment doesn't complete successfully.

" + "documentation":"

Information about a configuration for automatically rolling back to a previous version of an application revision when a deployment is not completed successfully.

" }, "AutoRollbackEvent":{ "type":"string", @@ -1063,7 +1063,7 @@ }, "errorMessage":{ "shape":"ErrorMessage", - "documentation":"

Information about errors that may have occurred during the API call.

" + "documentation":"

Information about errors that might have occurred during the API call.

" }, "revisions":{ "shape":"RevisionInfoList", @@ -1106,7 +1106,7 @@ }, "deploymentGroupNames":{ "shape":"DeploymentGroupsList", - "documentation":"

The deployment groups' names.

" + "documentation":"

The names of the deployment groups.

" } }, "documentation":"

Represents the input of a BatchGetDeploymentGroups operation.

" @@ -1120,7 +1120,7 @@ }, "errorMessage":{ "shape":"ErrorMessage", - "documentation":"

Information about errors that may have occurred during the API call.

" + "documentation":"

Information about errors that might have occurred during the API call.

" } }, "documentation":"

Represents the output of a BatchGetDeploymentGroups operation.

" @@ -1138,7 +1138,7 @@ }, "instanceIds":{ "shape":"InstancesList", - "documentation":"

The unique IDs of instances of the deployment.

" + "documentation":"

The unique IDs of instances used in the deployment.

" } }, "documentation":"

Represents the input of a BatchGetDeploymentInstances operation.

" @@ -1152,7 +1152,7 @@ }, "errorMessage":{ "shape":"ErrorMessage", - "documentation":"

Information about errors that may have occurred during the API call.

" + "documentation":"

Information about errors that might have occurred during the API call.

" } }, "documentation":"

Represents the output of a BatchGetDeploymentInstances operation.

" @@ -1166,7 +1166,7 @@ }, "targetIds":{ "shape":"TargetIdList", - "documentation":"

The unique IDs of the deployment targets. The compute platform of the deployment determines the type of the targets and their formats.

" + "documentation":"

The unique IDs of the deployment targets. The compute platform of the deployment determines the type of the targets and their formats.

" } } }, @@ -1175,7 +1175,7 @@ "members":{ "deploymentTargets":{ "shape":"DeploymentTargetList", - "documentation":"

A list of target objects for a deployment. Each target object contains details about the target, such as its status and lifecycle events. The type of the target objects depends on the deployment' compute platform.

" + "documentation":"

A list of target objects for a deployment. Each target object contains details about the target, such as its status and lifecycle events. The type of the target objects depends on the deployment' compute platform.

" } } }, @@ -1339,7 +1339,7 @@ }, "trafficRoutingConfig":{ "shape":"TrafficRoutingConfig", - "documentation":"

The configuration that specifies how the deployment traffic will be routed.

" + "documentation":"

The configuration that specifies how the deployment traffic is routed.

" }, "computePlatform":{ "shape":"ComputePlatform", @@ -1368,7 +1368,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "deploymentGroupName":{ "shape":"DeploymentGroupName", @@ -1376,19 +1376,19 @@ }, "deploymentConfigName":{ "shape":"DeploymentConfigName", - "documentation":"

If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.

CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or the deployment group.

For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy in the AWS CodeDeploy User Guide.

" + "documentation":"

If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.

CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or deployment group.

For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy in the AWS CodeDeploy User Guide.

" }, "ec2TagFilters":{ "shape":"EC2TagFilterList", - "documentation":"

The Amazon EC2 tags on which to filter. The deployment group will include EC2 instances with any of the specified tags. Cannot be used in the same call as ec2TagSet.

" + "documentation":"

The Amazon EC2 tags on which to filter. The deployment group includes EC2 instances with any of the specified tags. Cannot be used in the same call as ec2TagSet.

" }, "onPremisesInstanceTagFilters":{ "shape":"TagFilterList", - "documentation":"

The on-premises instance tags on which to filter. The deployment group will include on-premises instances with any of the specified tags. Cannot be used in the same call as OnPremisesTagSet.

" + "documentation":"

The on-premises instance tags on which to filter. The deployment group includes on-premises instances with any of the specified tags. Cannot be used in the same call as OnPremisesTagSet.

" }, "autoScalingGroups":{ "shape":"AutoScalingGroupNameList", - "documentation":"

A list of associated Auto Scaling groups.

" + "documentation":"

A list of associated Amazon EC2 Auto Scaling groups.

" }, "serviceRoleArn":{ "shape":"Role", @@ -1420,15 +1420,15 @@ }, "ec2TagSet":{ "shape":"EC2TagSet", - "documentation":"

Information about groups of tags applied to EC2 instances. The deployment group will include only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" + "documentation":"

Information about groups of tags applied to EC2 instances. The deployment group includes only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" }, "ecsServices":{ "shape":"ECSServiceList", - "documentation":"

The target ECS services in the deployment group. This only applies to deployment groups that use the Amazon ECS compute platform. A target ECS service is specified as an Amazon ECS cluster and service name pair using the format <clustername>:<servicename>.

" + "documentation":"

The target Amazon ECS services in the deployment group. This applies only to deployment groups that use the Amazon ECS compute platform. A target Amazon ECS service is specified as an Amazon ECS cluster and service name pair using the format <clustername>:<servicename>.

" }, "onPremisesTagSet":{ "shape":"OnPremisesTagSet", - "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group will include only on-premises instances identified by all the tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters.

" + "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group includes only on-premises instances identified by all of the tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters.

" } }, "documentation":"

Represents the input of a CreateDeploymentGroup operation.

" @@ -1449,7 +1449,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "deploymentGroupName":{ "shape":"DeploymentGroupName", @@ -1461,7 +1461,7 @@ }, "deploymentConfigName":{ "shape":"DeploymentConfigName", - "documentation":"

The name of a deployment configuration associated with the applicable IAM user or AWS account.

If not specified, the value configured in the deployment group will be used as the default. If the deployment group does not have a deployment configuration associated with it, then CodeDeployDefault.OneAtATime will be used by default.

" + "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

If not specified, the value configured in the deployment group is used as the default. If the deployment group does not have a deployment configuration associated with it, CodeDeployDefault.OneAtATime is used by default.

" }, "description":{ "shape":"Description", @@ -1469,11 +1469,11 @@ }, "ignoreApplicationStopFailures":{ "shape":"Boolean", - "documentation":"

If set to true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will not be considered to have failed at that point and will continue on to the BeforeInstall deployment lifecycle event.

If set to false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to an instance, the deployment to that instance will stop, and the deployment to that instance will be considered to have failed.

" + "documentation":"

If set to true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance is considered to have failed at that point and continues on to the BeforeInstall deployment lifecycle event.

If set to false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to an instance, the deployment to that instance stops, and the deployment to that instance is considered to have failed.

" }, "targetInstances":{ "shape":"TargetInstances", - "documentation":"

Information about the instances that will belong to the replacement environment in a blue/green deployment.

" + "documentation":"

Information about the instances that belong to the replacement environment in a blue/green deployment.

" }, "autoRollbackConfiguration":{ "shape":"AutoRollbackConfiguration", @@ -1506,7 +1506,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" } }, "documentation":"

Represents the input of a DeleteApplication operation.

" @@ -1517,7 +1517,7 @@ "members":{ "deploymentConfigName":{ "shape":"DeploymentConfigName", - "documentation":"

The name of a deployment configuration associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

" } }, "documentation":"

Represents the input of a DeleteDeploymentConfig operation.

" @@ -1531,11 +1531,11 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "deploymentGroupName":{ "shape":"DeploymentGroupName", - "documentation":"

The name of an existing deployment group for the specified application.

" + "documentation":"

The name of a deployment group for the specified application.

" } }, "documentation":"

Represents the input of a DeleteDeploymentGroup operation.

" @@ -1581,21 +1581,21 @@ "type":"structure", "members":{ }, - "documentation":"

A deployment to a target was attempted while another deployment was in process.

", + "documentation":"

A deployment to a target was attempted while another deployment was in progress.

", "exception":true }, "DeploymentConfigAlreadyExistsException":{ "type":"structure", "members":{ }, - "documentation":"

A deployment configuration with the specified name already exists with the applicable IAM user or AWS account.

", + "documentation":"

A deployment configuration with the specified name with the IAM user or AWS account already exists .

", "exception":true }, "DeploymentConfigDoesNotExistException":{ "type":"structure", "members":{ }, - "documentation":"

The deployment configuration does not exist with the applicable IAM user or AWS account.

", + "documentation":"

The deployment configuration does not exist with the IAM user or AWS account.

", "exception":true }, "DeploymentConfigId":{"type":"string"}, @@ -1631,7 +1631,7 @@ }, "trafficRoutingConfig":{ "shape":"TrafficRoutingConfig", - "documentation":"

The configuration specifying how the deployment traffic will be routed. Only deployments with a Lambda compute platform can specify this.

" + "documentation":"

The configuration that specifies how the deployment traffic is routed. Only deployments with a Lambda compute platform can specify this.

" } }, "documentation":"

Information about a deployment configuration.

" @@ -1671,21 +1671,21 @@ "type":"structure", "members":{ }, - "documentation":"

The deployment does not exist with the applicable IAM user or AWS account.

", + "documentation":"

The deployment with the IAM user or AWS account does not exist.

", "exception":true }, "DeploymentGroupAlreadyExistsException":{ "type":"structure", "members":{ }, - "documentation":"

A deployment group with the specified name already exists with the applicable IAM user or AWS account.

", + "documentation":"

A deployment group with the specified name with the IAM user or AWS account already exists.

", "exception":true }, "DeploymentGroupDoesNotExistException":{ "type":"structure", "members":{ }, - "documentation":"

The named deployment group does not exist with the applicable IAM user or AWS account.

", + "documentation":"

The named deployment group with the IAM user or AWS account does not exist.

", "exception":true }, "DeploymentGroupId":{"type":"string"}, @@ -1762,7 +1762,7 @@ }, "ec2TagSet":{ "shape":"EC2TagSet", - "documentation":"

Information about groups of tags applied to an EC2 instance. The deployment group includes only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" + "documentation":"

Information about groups of tags applied to an EC2 instance. The deployment group includes only EC2 instances identified by all of the tag groups. Cannot be used in the same call as ec2TagFilters.

" }, "onPremisesTagSet":{ "shape":"OnPremisesTagSet", @@ -1774,7 +1774,7 @@ }, "ecsServices":{ "shape":"ECSServiceList", - "documentation":"

The target ECS services in the deployment group. This only applies to deployment groups that use the Amazon ECS compute platform. A target ECS service is specified as an Amazon ECS cluster and service name pair using the format <clustername>:<servicename>.

" + "documentation":"

The target Amazon ECS services in the deployment group. This applies only to deployment groups that use the Amazon ECS compute platform. A target Amazon ECS service is specified as an Amazon ECS cluster and service name pair using the format <clustername>:<servicename>.

" } }, "documentation":"

Information about a deployment group.

" @@ -1851,15 +1851,15 @@ }, "createTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the deployment was created.

" + "documentation":"

A timestamp that indicates when the deployment was created.

" }, "startTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the deployment was deployed to the deployment group.

In some cases, the reported value of the start time may be later than the complete time. This is due to differences in the clock settings of back-end servers that participate in the deployment process.

" + "documentation":"

A timestamp that indicates when the deployment was deployed to the deployment group.

In some cases, the reported value of the start time might be later than the complete time. This is due to differences in the clock settings of backend servers that participate in the deployment process.

" }, "completeTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the deployment was complete.

" + "documentation":"

A timestamp that indicates when the deployment was complete.

" }, "deploymentOverview":{ "shape":"DeploymentOverview", @@ -1871,11 +1871,11 @@ }, "creator":{ "shape":"DeploymentCreator", - "documentation":"

The means by which the deployment was created:

" + "documentation":"

The means by which the deployment was created:

" }, "ignoreApplicationStopFailures":{ "shape":"Boolean", - "documentation":"

If true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will not be considered to have failed at that point and will continue on to the BeforeInstall deployment lifecycle event.

If false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will stop, and the deployment to that instance will be considered to have failed.

" + "documentation":"

If true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance is not considered to have failed at that point and continues on to the BeforeInstall deployment lifecycle event.

If false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance stops, and the deployment to that instance is considered to have failed.

" }, "autoRollbackConfiguration":{ "shape":"AutoRollbackConfiguration", @@ -1899,7 +1899,7 @@ }, "instanceTerminationWaitTimeStarted":{ "shape":"Boolean", - "documentation":"

Indicates whether the wait period set for the termination of instances in the original environment has started. Status is 'false' if the KEEP_ALIVE option is specified; otherwise, 'true' as soon as the termination wait period starts.

" + "documentation":"

Indicates whether the wait period set for the termination of instances in the original environment has started. Status is 'false' if the KEEP_ALIVE option is specified. Otherwise, 'true' as soon as the termination wait period starts.

" }, "blueGreenDeploymentConfiguration":{ "shape":"BlueGreenDeploymentConfiguration", @@ -2002,7 +2002,7 @@ }, "waitTimeInMinutes":{ "shape":"Duration", - "documentation":"

The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout

" + "documentation":"

The number of minutes to wait before the status of a blue/green deployment is changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout

" } }, "documentation":"

Information about how traffic is rerouted to instances in a replacement environment in a blue/green deployment.

" @@ -2046,7 +2046,7 @@ "members":{ "deploymentTargetType":{ "shape":"DeploymentTargetType", - "documentation":"

The deployment type which is specific to the deployment's compute platform.

" + "documentation":"

The deployment type that is specific to the deployment's compute platform.

" }, "instanceTarget":{ "shape":"InstanceTarget", @@ -2195,7 +2195,7 @@ "members":{ "ec2TagSetList":{ "shape":"EC2TagSetList", - "documentation":"

A list containing other lists of EC2 instance tag groups. In order for an instance to be included in the deployment group, it must be identified by all the tag groups in the list.

" + "documentation":"

A list that contains other lists of EC2 instance tag groups. For an instance to be included in the deployment group, it must be identified by all of the tag groups in the list.

" } }, "documentation":"

Information about groups of EC2 instance tags.

" @@ -2210,11 +2210,11 @@ "members":{ "serviceName":{ "shape":"ECSServiceName", - "documentation":"

The name of the target ECS service.

" + "documentation":"

The name of the target Amazon ECS service.

" }, "clusterName":{ "shape":"ECSClusterName", - "documentation":"

The name of the cluster that the ECS service is associated with.

" + "documentation":"

The name of the cluster that the Amazon ECS service is associated with.

" } }, "documentation":"

Contains the service and cluster names used to identify an Amazon ECS deployment's target.

" @@ -2227,7 +2227,7 @@ "type":"structure", "members":{ }, - "documentation":"

The Amazon ECS service is associated with more than one deployment groups. An ECS service can only be associated with one deployment group.

", + "documentation":"

The Amazon ECS service is associated with more than one deployment groups. An Amazon ECS service can be associated with only one deployment group.

", "exception":true }, "ECSServiceName":{"type":"string"}, @@ -2274,7 +2274,7 @@ }, "desiredCount":{ "shape":"ECSTaskSetCount", - "documentation":"

The number of tasks in a task set. During a deployment that uses the Amazon ECS compute type, CodeDeploy asks Amazon ECS to create a new task set and uses this value to determine how many tasks to create. After the updated task set is created, CodeDeploy shifts traffic to the new task set.

" + "documentation":"

The number of tasks in a task set. During a deployment that uses the Amazon ECS compute type, CodeDeploy instructs Amazon ECS to create a new task set and uses this value to determine how many tasks to create. After the updated task set is created, CodeDeploy shifts traffic to the new task set.

" }, "pendingCount":{ "shape":"ECSTaskSetCount", @@ -2286,7 +2286,7 @@ }, "status":{ "shape":"ECSTaskSetStatus", - "documentation":"

The status of the task set. There are three valid task set statuses:

" + "documentation":"

The status of the task set. There are three valid task set statuses:

" }, "trafficWeight":{ "shape":"TrafficWeight", @@ -2301,7 +2301,7 @@ "documentation":"

A label that identifies whether the ECS task set is an original target (BLUE) or a replacement target (GREEN).

" } }, - "documentation":"

A set of Amazon ECS tasks. A task set runs a specified number of instances of a task definition simultaneously inside an Amazon ECS service. Information about a set of Amazon ECS tasks in an AWS CodeDeploy deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic or not.

" + "documentation":"

Information about a set of Amazon ECS tasks in an AWS CodeDeploy deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic. An AWS CodeDeploy application that uses the Amazon ECS compute platform deploys a containerized application in an Amazon ECS service as a task set.

" }, "ECSTaskSetCount":{"type":"long"}, "ECSTaskSetIdentifier":{"type":"string"}, @@ -2315,7 +2315,7 @@ "members":{ "name":{ "shape":"ELBName", - "documentation":"

For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" + "documentation":"

For blue/green deployments, the name of the load balancer that is used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment is complete.

" } }, "documentation":"

Information about a load balancer in Elastic Load Balancing to use in a deployment. Instances are registered directly with a load balancer, and traffic is routed to the load balancer.

" @@ -2366,7 +2366,7 @@ "members":{ "code":{ "shape":"ErrorCode", - "documentation":"

For information about additional error codes, see Error Codes for AWS CodeDeploy in the AWS CodeDeploy User Guide.

The error code:

" + "documentation":"

For more information, see Error Codes for AWS CodeDeploy in the AWS CodeDeploy User Guide.

The error code:

" }, "message":{ "shape":"ErrorMessage", @@ -2421,7 +2421,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" } }, "documentation":"

Represents the input of a GetApplication operation.

" @@ -2478,7 +2478,7 @@ "members":{ "deploymentConfigName":{ "shape":"DeploymentConfigName", - "documentation":"

The name of a deployment configuration associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

" } }, "documentation":"

Represents the input of a GetDeploymentConfig operation.

" @@ -2502,11 +2502,11 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "deploymentGroupName":{ "shape":"DeploymentGroupName", - "documentation":"

The name of an existing deployment group for the specified application.

" + "documentation":"

The name of a deployment group for the specified application.

" } }, "documentation":"

Represents the input of a GetDeploymentGroup operation.

" @@ -2527,7 +2527,7 @@ "members":{ "deploymentId":{ "shape":"DeploymentId", - "documentation":"

The unique ID of a deployment associated with the applicable IAM user or AWS account.

" + "documentation":"

The unique ID of a deployment associated with the IAM user or AWS account.

" } }, "documentation":"

Represents the input of a GetDeployment operation.

" @@ -2588,7 +2588,7 @@ "members":{ "deploymentTarget":{ "shape":"DeploymentTarget", - "documentation":"

A deployment target that contains information about a deployment such as its status, lifecyle events, and when it was updated last. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).

" + "documentation":"

A deployment target that contains information about a deployment such as its status, lifecyle events, and when it was last updated. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).

" } } }, @@ -2827,7 +2827,7 @@ }, "lastUpdatedAt":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the instance information was last updated.

" + "documentation":"

A timestamp that indicaties when the instance information was last updated.

" }, "lifecycleEvents":{ "shape":"LifecycleEventList", @@ -2899,7 +2899,7 @@ "type":"structure", "members":{ }, - "documentation":"

The format of the alarm configuration is invalid. Possible causes include:

", + "documentation":"

The format of the alarm configuration is invalid. Possible causes include:

", "exception":true }, "InvalidApplicationNameException":{ @@ -2913,7 +2913,7 @@ "type":"structure", "members":{ }, - "documentation":"

The automatic rollback configuration was specified in an invalid format. For example, automatic rollback is enabled but an invalid triggering event type or no event types were listed.

", + "documentation":"

The automatic rollback configuration was specified in an invalid format. For example, automatic rollback is enabled, but an invalid triggering event type or no event types were listed.

", "exception":true }, "InvalidAutoScalingGroupException":{ @@ -2990,14 +2990,14 @@ "type":"structure", "members":{ }, - "documentation":"

An invalid deployment style was specified. Valid deployment types include \"IN_PLACE\" and \"BLUE_GREEN\". Valid deployment options include \"WITH_TRAFFIC_CONTROL\" and \"WITHOUT_TRAFFIC_CONTROL\".

", + "documentation":"

An invalid deployment style was specified. Valid deployment types include \"IN_PLACE\" and \"BLUE_GREEN.\" Valid deployment options include \"WITH_TRAFFIC_CONTROL\" and \"WITHOUT_TRAFFIC_CONTROL.\"

", "exception":true }, "InvalidDeploymentTargetIdException":{ "type":"structure", "members":{ }, - "documentation":"

The target ID provide was not valid.

", + "documentation":"

The target ID provided was not valid.

", "exception":true }, "InvalidDeploymentWaitTypeException":{ @@ -3032,7 +3032,7 @@ "type":"structure", "members":{ }, - "documentation":"

An invalid fileExistsBehavior option was specified to determine how AWS CodeDeploy handles files or directories that already exist in a deployment target location but weren't part of the previous successful deployment. Valid values include \"DISALLOW\", \"OVERWRITE\", and \"RETAIN\".

", + "documentation":"

An invalid fileExistsBehavior option was specified to determine how AWS CodeDeploy handles files or directories that already exist in a deployment target location, but weren't part of the previous successful deployment. Valid values include \"DISALLOW,\" \"OVERWRITE,\" and \"RETAIN.\"

", "exception":true }, "InvalidGitHubAccountTokenException":{ @@ -3074,7 +3074,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified input was specified in an invalid format.

", + "documentation":"

The input was specified in an invalid format.

", "exception":true }, "InvalidInstanceIdException":{ @@ -3088,7 +3088,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified on-premises instance name was specified in an invalid format.

", + "documentation":"

The on-premises instance name was specified in an invalid format.

", "exception":true }, "InvalidInstanceStatusException":{ @@ -3179,7 +3179,7 @@ "type":"structure", "members":{ }, - "documentation":"

The service role ARN was specified in an invalid format. Or, if an Auto Scaling group was specified, the specified service role does not grant the appropriate permissions to Auto Scaling.

", + "documentation":"

The service role ARN was specified in an invalid format. Or, if an Auto Scaling group was specified, the specified service role does not grant the appropriate permissions to Amazon EC2 Auto Scaling.

", "exception":true }, "InvalidSortByException":{ @@ -3200,14 +3200,14 @@ "type":"structure", "members":{ }, - "documentation":"

The specified tag was specified in an invalid format.

", + "documentation":"

The tag was specified in an invalid format.

", "exception":true }, "InvalidTagFilterException":{ "type":"structure", "members":{ }, - "documentation":"

The specified tag filter was specified in an invalid format.

", + "documentation":"

The tag filter was specified in an invalid format.

", "exception":true }, "InvalidTargetException":{ @@ -3310,11 +3310,11 @@ }, "endTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the most recent deployment to the deployment group completed.

" + "documentation":"

A timestamp that indicates when the most recent deployment to the deployment group was complete.

" }, "createTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the most recent deployment to the deployment group started.

" + "documentation":"

A timestamp that indicates when the most recent deployment to the deployment group started.

" } }, "documentation":"

Information about the most recent attempted or successful deployment to a deployment group.

" @@ -3343,11 +3343,11 @@ }, "startTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the deployment lifecycle event started.

" + "documentation":"

A timestamp that indicates when the deployment lifecycle event started.

" }, "endTime":{ "shape":"Timestamp", - "documentation":"

A timestamp indicating when the deployment lifecycle event ended.

" + "documentation":"

A timestamp that indicates when the deployment lifecycle event ended.

" }, "status":{ "shape":"LifecycleEventStatus", @@ -3394,15 +3394,15 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "sortBy":{ "shape":"ApplicationRevisionSortBy", - "documentation":"

The column name to use to sort the list results:

If not specified or set to null, the results will be returned in an arbitrary order.

" + "documentation":"

The column name to use to sort the list results:

If not specified or set to null, the results are returned in an arbitrary order.

" }, "sortOrder":{ "shape":"SortOrder", - "documentation":"

The order in which to sort the list results:

If not specified, the results will be sorted in ascending order.

If set to null, the results will be sorted in an arbitrary order.

" + "documentation":"

The order in which to sort the list results:

If not specified, the results are sorted in ascending order.

If set to null, the results are sorted in an arbitrary order.

" }, "s3Bucket":{ "shape":"S3Bucket", @@ -3432,7 +3432,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If a large amount of information is returned, an identifier will also be returned. It can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

" + "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

" } }, "documentation":"

Represents the output of a ListApplicationRevisions operation.

" @@ -3456,7 +3456,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list applications call to return the next set of applications, will also be returned. in the list.

" + "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list applications call to return the next set of applications in the list.

" } }, "documentation":"

Represents the output of a ListApplications operation.

" @@ -3491,7 +3491,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "nextToken":{ "shape":"NextToken", @@ -3509,7 +3509,7 @@ }, "deploymentGroups":{ "shape":"DeploymentGroupsList", - "documentation":"

A list of corresponding deployment group names.

" + "documentation":"

A list of deployment group names.

" }, "nextToken":{ "shape":"NextToken", @@ -3532,7 +3532,7 @@ }, "instanceStatusFilter":{ "shape":"InstanceStatusList", - "documentation":"

A subset of instances to list by status:

" + "documentation":"

A subset of instances to list by status:

" }, "instanceTypeFilter":{ "shape":"InstanceTypeList", @@ -3581,7 +3581,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If a large amount of information is returned, a token identifier will also be returned. It can be used in a subsequent ListDeploymentTargets call to return the next set of deployment targets in the list.

" + "documentation":"

If a large amount of information is returned, a token identifier is also returned. It can be used in a subsequent ListDeploymentTargets call to return the next set of deployment targets in the list.

" } } }, @@ -3590,11 +3590,11 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "deploymentGroupName":{ "shape":"DeploymentGroupName", - "documentation":"

The name of an existing deployment group for the specified application.

" + "documentation":"

The name of a deployment group for the specified application.

" }, "includeOnlyStatuses":{ "shape":"DeploymentStatusList", @@ -3658,7 +3658,7 @@ }, "tagFilters":{ "shape":"TagFilterList", - "documentation":"

The on-premises instance tags that will be used to restrict the corresponding on-premises instance names returned.

" + "documentation":"

The on-premises instance tags that are used to restrict the on-premises instance names returned.

" }, "nextToken":{ "shape":"NextToken", @@ -3679,7 +3679,7 @@ "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list on-premises instances call to return the next set of on-premises instances in the list.

" } }, - "documentation":"

Represents the output of list on-premises instances operation.

" + "documentation":"

Represents the output of the list on-premises instances operation.

" }, "ListStateFilterAction":{ "type":"string", @@ -3699,11 +3699,11 @@ "members":{ "elbInfoList":{ "shape":"ELBInfoList", - "documentation":"

An array containing information about the load balancer to use for load balancing in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load Balancers.

Adding more than one load balancer to the array is not supported.

" + "documentation":"

An array that contains information about the load balancer to use for load balancing in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load Balancers.

Adding more than one load balancer to the array is not supported.

" }, "targetGroupInfoList":{ "shape":"TargetGroupInfoList", - "documentation":"

An array containing information about the target group to use for load balancing in a deployment. In Elastic Load Balancing, target groups are used with Application Load Balancers.

Adding more than one target group to the array is not supported.

" + "documentation":"

An array that contains information about the target group to use for load balancing in a deployment. In Elastic Load Balancing, target groups are used with Application Load Balancers.

Adding more than one target group to the array is not supported.

" }, "targetGroupPairInfoList":{ "shape":"TargetGroupPairInfoList", @@ -3723,7 +3723,7 @@ }, "type":{ "shape":"MinimumHealthyHostsType", - "documentation":"

The minimum healthy instance type:

In an example of nine instance, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment will be successful if six or more instances are deployed to successfully; otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instance at a time. The deployment will be successful if four or more instance are deployed to successfully; otherwise, the deployment fails.

In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime will return a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy will try to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment still succeeds.

For more information, see AWS CodeDeploy Instance Health in the AWS CodeDeploy User Guide.

" + "documentation":"

The minimum healthy instance type:

In an example of nine instance, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment is successful if six or more instances are deployed to successfully. Otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instance at a time. The deployment is successful if four or more instance are deployed to successfully. Otherwise, the deployment fails.

In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime returns a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy attempts to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment is still successful.

For more information, see AWS CodeDeploy Instance Health in the AWS CodeDeploy User Guide.

" } }, "documentation":"

Information about minimum healthy instance.

" @@ -3750,7 +3750,7 @@ "members":{ "onPremisesTagSetList":{ "shape":"OnPremisesTagSetList", - "documentation":"

A list containing other lists of on-premises instance tag groups. In order for an instance to be included in the deployment group, it must be identified by all the tag groups in the list.

" + "documentation":"

A list that contains other lists of on-premises instance tag groups. For an instance to be included in the deployment group, it must be identified by all of the tag groups in the list.

" } }, "documentation":"

Information about groups of on-premises instance tags.

" @@ -3820,7 +3820,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

" + "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" }, "description":{ "shape":"Description", @@ -3889,7 +3889,7 @@ "type":"structure", "members":{ }, - "documentation":"

The named revision does not exist with the applicable IAM user or AWS account.

", + "documentation":"

The named revision does not exist with the IAM user or AWS account.

", "exception":true }, "RevisionInfo":{ @@ -3915,7 +3915,7 @@ "members":{ "revisionType":{ "shape":"RevisionLocationType", - "documentation":"

The type of application revision:

" + "documentation":"

The type of application revision:

" }, "s3Location":{ "shape":"S3Location", @@ -3977,7 +3977,7 @@ }, "rollbackMessage":{ "shape":"Description", - "documentation":"

Information describing the status of a deployment rollback; for example, whether the deployment can't be rolled back, is in progress, failed, or succeeded.

" + "documentation":"

Information that describes the status of a deployment rollback (for example, whether the deployment can't be rolled back, is in progress, failed, or succeeded).

" } }, "documentation":"

Information about a deployment rollback.

" @@ -4001,11 +4001,11 @@ }, "version":{ "shape":"VersionId", - "documentation":"

A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.

If the version is not specified, the system will use the most recent version by default.

" + "documentation":"

A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.

If the version is not specified, the system uses the most recent version by default.

" }, "eTag":{ "shape":"ETag", - "documentation":"

The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.

If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.

" + "documentation":"

The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.

If the ETag is not specified as an input parameter, ETag validation of the object is skipped.

" } }, "documentation":"

Information about the location of application artifacts stored in Amazon S3.

" @@ -4150,7 +4150,7 @@ "members":{ "name":{ "shape":"TargetGroupName", - "documentation":"

For blue/green deployments, the name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" + "documentation":"

For blue/green deployments, the name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment are registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment is complete.

" } }, "documentation":"

Information about a target group in Elastic Load Balancing to use in a deployment. Instances are registered as targets in a target group, and traffic is routed to the target group.

" @@ -4165,7 +4165,7 @@ "members":{ "targetGroups":{ "shape":"TargetGroupInfoList", - "documentation":"

One pair of target groups. One is associated with the original task set. The second target is associated with the task set that serves traffic after the deployment completes.

" + "documentation":"

One pair of target groups. One is associated with the original task set. The second is associated with the task set that serves traffic after the deployment is complete.

" }, "prodTrafficRoute":{ "shape":"TrafficRoute", @@ -4173,10 +4173,10 @@ }, "testTrafficRoute":{ "shape":"TrafficRoute", - "documentation":"

An optional path used by a load balancer to route test traffic after an Amazon ECS deployment. Validation can happen while test traffic is served during a deployment.

" + "documentation":"

An optional path used by a load balancer to route test traffic after an Amazon ECS deployment. Validation can occur while test traffic is served during a deployment.

" } }, - "documentation":"

Information about two target groups and how traffic routes during an Amazon ECS deployment. An optional test traffic route can be specified.

" + "documentation":"

Information about two target groups and how traffic is routed during an Amazon ECS deployment. An optional test traffic route can be specified.

" }, "TargetGroupPairInfoList":{ "type":"list", @@ -4392,7 +4392,7 @@ "members":{ "applicationName":{ "shape":"ApplicationName", - "documentation":"

The application name corresponding to the deployment group to update.

" + "documentation":"

The application name that corresponds to the deployment group to update.

" }, "currentDeploymentGroupName":{ "shape":"DeploymentGroupName", @@ -4448,15 +4448,15 @@ }, "ec2TagSet":{ "shape":"EC2TagSet", - "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group will include only EC2 instances identified by all the tag groups.

" + "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group includes only EC2 instances identified by all the tag groups.

" }, "ecsServices":{ "shape":"ECSServiceList", - "documentation":"

The target ECS services in the deployment group. This only applies to deployment groups that use the Amazon ECS compute platform. A target ECS service is specified as an Amazon ECS cluster and service name pair using the format <clustername>:<servicename>.

" + "documentation":"

The target Amazon ECS services in the deployment group. This applies only to deployment groups that use the Amazon ECS compute platform. A target Amazon ECS service is specified as an Amazon ECS cluster and service name pair using the format <clustername>:<servicename>.

" }, "onPremisesTagSet":{ "shape":"OnPremisesTagSet", - "documentation":"

Information about an on-premises instance tag set. The deployment group will include only on-premises instances identified by all the tag groups.

" + "documentation":"

Information about an on-premises instance tag set. The deployment group includes only on-premises instances identified by all the tag groups.

" } }, "documentation":"

Represents the input of an UpdateDeploymentGroup operation.

" @@ -4475,5 +4475,5 @@ "VersionId":{"type":"string"}, "WaitTimeInMins":{"type":"integer"} }, - "documentation":"AWS CodeDeploy

AWS CodeDeploy is a deployment service that automates application deployments to Amazon EC2 instances, on-premises instances running in your own facility, or serverless AWS Lambda functions.

You can deploy a nearly unlimited variety of application content, such as an updated Lambda function, code, web and configuration files, executables, packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy application content stored in Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes to your existing code before you can use AWS CodeDeploy.

AWS CodeDeploy makes it easier for you to rapidly release new features, helps you avoid downtime during application deployment, and handles the complexity of updating your applications, without many of the risks associated with error-prone manual deployments.

AWS CodeDeploy Components

Use the information in this guide to help you work with the following AWS CodeDeploy components:

This guide also contains information to help you get details about the instances in your deployments, to make on-premises instances available for AWS CodeDeploy deployments, and to get details about a Lambda function deployment.

AWS CodeDeploy Information Resources

" + "documentation":"AWS CodeDeploy

AWS CodeDeploy is a deployment service that automates application deployments to Amazon EC2 instances, on-premises instances running in your own facility, serverless AWS Lambda functions, or applications in an Amazon ECS service.

You can deploy a nearly unlimited variety of application content, such as an updated Lambda function, updated applications in an Amazon ECS service, code, web and configuration files, executables, packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy application content stored in Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes to your existing code before you can use AWS CodeDeploy.

AWS CodeDeploy makes it easier for you to rapidly release new features, helps you avoid downtime during application deployment, and handles the complexity of updating your applications, without many of the risks associated with error-prone manual deployments.

AWS CodeDeploy Components

Use the information in this guide to help you work with the following AWS CodeDeploy components:

This guide also contains information to help you get details about the instances in your deployments, to make on-premises instances available for AWS CodeDeploy deployments, to get details about a Lambda function deployment, and to get details about Amazon ECS service deployments.

AWS CodeDeploy Information Resources

" } diff --git a/botocore/data/codepipeline/2015-07-09/paginators-1.json b/botocore/data/codepipeline/2015-07-09/paginators-1.json index ea142457..bb73240e 100644 --- a/botocore/data/codepipeline/2015-07-09/paginators-1.json +++ b/botocore/data/codepipeline/2015-07-09/paginators-1.json @@ -1,3 +1,26 @@ { - "pagination": {} + "pagination": { + "ListActionTypes": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "actionTypes" + }, + "ListPipelineExecutions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "pipelineExecutionSummaries" + }, + "ListPipelines": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "pipelines" + }, + "ListWebhooks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "webhooks" + } + } } diff --git a/botocore/data/codestar/2017-04-19/paginators-1.json b/botocore/data/codestar/2017-04-19/paginators-1.json index ea142457..d0c91820 100644 --- a/botocore/data/codestar/2017-04-19/paginators-1.json +++ b/botocore/data/codestar/2017-04-19/paginators-1.json @@ -1,3 +1,28 @@ { - "pagination": {} + "pagination": { + "ListProjects": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "projects" + }, + "ListResources": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resources" + }, + "ListTeamMembers": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "teamMembers" + }, + "ListUserProfiles": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "userProfiles" + } + } } diff --git a/botocore/data/cognito-identity/2014-06-30/paginators-1.json b/botocore/data/cognito-identity/2014-06-30/paginators-1.json index ea142457..2af6e40c 100644 --- a/botocore/data/cognito-identity/2014-06-30/paginators-1.json +++ b/botocore/data/cognito-identity/2014-06-30/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListIdentityPools": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "IdentityPools" + } + } } diff --git a/botocore/data/cognito-idp/2016-04-18/paginators-1.json b/botocore/data/cognito-idp/2016-04-18/paginators-1.json index ea142457..04574669 100644 --- a/botocore/data/cognito-idp/2016-04-18/paginators-1.json +++ b/botocore/data/cognito-idp/2016-04-18/paginators-1.json @@ -1,3 +1,52 @@ { - "pagination": {} + "pagination": { + "AdminListGroupsForUser": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Groups" + }, + "AdminListUserAuthEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AuthEvents" + }, + "ListGroups": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Groups" + }, + "ListIdentityProviders": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Providers" + }, + "ListResourceServers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ResourceServers" + }, + "ListUserPoolClients": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "UserPoolClients" + }, + "ListUserPools": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "UserPools" + }, + "ListUsersInGroup": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Users" + } + } } diff --git a/botocore/data/comprehend/2017-11-27/paginators-1.json b/botocore/data/comprehend/2017-11-27/paginators-1.json index 2d021747..eb84c64c 100644 --- a/botocore/data/comprehend/2017-11-27/paginators-1.json +++ b/botocore/data/comprehend/2017-11-27/paginators-1.json @@ -5,6 +5,48 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "ListDocumentClassificationJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DocumentClassificationJobPropertiesList" + }, + "ListDocumentClassifiers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DocumentClassifierPropertiesList" + }, + "ListDominantLanguageDetectionJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DominantLanguageDetectionJobPropertiesList" + }, + "ListEntitiesDetectionJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "EntitiesDetectionJobPropertiesList" + }, + "ListEntityRecognizers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "EntityRecognizerPropertiesList" + }, + "ListKeyPhrasesDetectionJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "KeyPhrasesDetectionJobPropertiesList" + }, + "ListSentimentDetectionJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SentimentDetectionJobPropertiesList" } } } diff --git a/botocore/data/config/2014-11-12/paginators-1.json b/botocore/data/config/2014-11-12/paginators-1.json index 7b1f79b1..1f449912 100644 --- a/botocore/data/config/2014-11-12/paginators-1.json +++ b/botocore/data/config/2014-11-12/paginators-1.json @@ -35,6 +35,59 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "resourceIdentifiers" + }, + "DescribeAggregateComplianceByConfigRules": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "AggregateComplianceByConfigRules" + }, + "DescribeAggregationAuthorizations": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "AggregationAuthorizations" + }, + "DescribeConfigRuleEvaluationStatus": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "ConfigRulesEvaluationStatus" + }, + "DescribeConfigurationAggregatorSourcesStatus": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "AggregatedSourceStatusList" + }, + "DescribeConfigurationAggregators": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "ConfigurationAggregators" + }, + "DescribePendingAggregationRequests": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "PendingAggregationRequests" + }, + "DescribeRetentionConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "RetentionConfigurations" + }, + "GetAggregateComplianceDetailsByConfigRule": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "AggregateEvaluationResults" + }, + "ListAggregateDiscoveredResources": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "ResourceIdentifiers" } } } diff --git a/botocore/data/connect/2017-08-08/paginators-1.json b/botocore/data/connect/2017-08-08/paginators-1.json index ea142457..dd013eed 100644 --- a/botocore/data/connect/2017-08-08/paginators-1.json +++ b/botocore/data/connect/2017-08-08/paginators-1.json @@ -1,3 +1,34 @@ { - "pagination": {} + "pagination": { + "GetMetricData": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MetricResults" + }, + "ListRoutingProfiles": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "RoutingProfileSummaryList" + }, + "ListSecurityProfiles": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SecurityProfileSummaryList" + }, + "ListUserHierarchyGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "UserHierarchyGroupSummaryList" + }, + "ListUsers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "UserSummaryList" + } + } } diff --git a/botocore/data/cur/2017-01-06/service-2.json b/botocore/data/cur/2017-01-06/service-2.json index 4ed705a8..b93be585 100644 --- a/botocore/data/cur/2017-01-06/service-2.json +++ b/botocore/data/cur/2017-01-06/service-2.json @@ -25,7 +25,7 @@ {"shape":"InternalErrorException"}, {"shape":"ValidationException"} ], - "documentation":"Delete a specified report definition" + "documentation":"

Deletes the specified report.

" }, "DescribeReportDefinitions":{ "name":"DescribeReportDefinitions", @@ -38,7 +38,7 @@ "errors":[ {"shape":"InternalErrorException"} ], - "documentation":"Describe a list of report definitions owned by the account" + "documentation":"

Lists the AWS Cost and Usage reports available to this account.

" }, "PutReportDefinition":{ "name":"PutReportDefinition", @@ -54,13 +54,13 @@ {"shape":"InternalErrorException"}, {"shape":"ValidationException"} ], - "documentation":"Create a new report definition" + "documentation":"

Creates a new report using the description that you provide.

" } }, "shapes":{ "AWSRegion":{ "type":"string", - "documentation":"Region of customer S3 bucket.", + "documentation":"

The region of the S3 bucket that AWS delivers the report into.

", "enum":[ "us-east-1", "us-west-1", @@ -69,28 +69,32 @@ "eu-west-1", "ap-southeast-1", "ap-southeast-2", - "ap-northeast-1" + "ap-northeast-1", + "eu-north-1", + "ap-northeast-3" ] }, "AdditionalArtifact":{ "type":"string", - "documentation":"Enable support for Redshift and/or QuickSight.", + "documentation":"

The types of manifest that you want AWS to create for this report.

", "enum":[ "REDSHIFT", - "QUICKSIGHT" + "QUICKSIGHT", + "ATHENA" ] }, "AdditionalArtifactList":{ "type":"list", "member":{"shape":"AdditionalArtifact"}, - "documentation":"A list of additional artifacts." + "documentation":"

A list of additional artifacts.

" }, "CompressionFormat":{ "type":"string", - "documentation":"Preferred compression format for report.", + "documentation":"

The compression format that AWS uses for the report.

", "enum":[ "ZIP", - "GZIP" + "GZIP", + "Parquet" ] }, "DeleteReportDefinitionRequest":{ @@ -98,18 +102,18 @@ "members":{ "ReportName":{"shape":"ReportName"} }, - "documentation":"Request of DeleteReportDefinition" + "documentation":"

Deletes the specified report.

" }, "DeleteReportDefinitionResponse":{ "type":"structure", "members":{ "ResponseMessage":{"shape":"DeleteResponseMessage"} }, - "documentation":"Response of DeleteReportDefinition" + "documentation":"

If the action is successful, the service sends back an HTTP 200 response.

" }, "DeleteResponseMessage":{ "type":"string", - "documentation":"A message indicates if the deletion is successful." + "documentation":"

Whether the deletion was successful or not.

" }, "DescribeReportDefinitionsRequest":{ "type":"structure", @@ -117,44 +121,47 @@ "MaxResults":{"shape":"MaxResults"}, "NextToken":{"shape":"GenericString"} }, - "documentation":"Request of DescribeReportDefinitions" + "documentation":"

Requests a list of AWS Cost and Usage reports owned by the account.

" }, "DescribeReportDefinitionsResponse":{ "type":"structure", "members":{ - "ReportDefinitions":{"shape":"ReportDefinitionList"}, + "ReportDefinitions":{ + "shape":"ReportDefinitionList", + "documentation":"

A list of AWS Cost and Usage reports owned by the account.

" + }, "NextToken":{"shape":"GenericString"} }, - "documentation":"Response of DescribeReportDefinitions" + "documentation":"

If the action is successful, the service sends back an HTTP 200 response.

" }, "DuplicateReportNameException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"This exception is thrown when putting a report preference with a name that already exists.", + "documentation":"

A report with the specified name already exists in the account. Specify a different report name.

", "exception":true }, "ErrorMessage":{ "type":"string", - "documentation":"A message to show the detail of the exception." + "documentation":"

A message to show the detail of the exception.

" }, "GenericString":{ "type":"string", - "documentation":"A generic string." + "documentation":"

A generic string.

" }, "InternalErrorException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"This exception is thrown on a known dependency failure.", + "documentation":"

An error on the server occurred during the processing of your request. Try again later.

", "exception":true, "fault":true }, "MaxResults":{ "type":"integer", - "documentation":"The max number of results returned by the operation.", + "documentation":"

The maximum number of results that AWS returns for the operation.

", "box":true, "max":5, "min":5 @@ -163,15 +170,22 @@ "type":"structure", "required":["ReportDefinition"], "members":{ - "ReportDefinition":{"shape":"ReportDefinition"} + "ReportDefinition":{ + "shape":"ReportDefinition", + "documentation":"

Represents the output of the PutReportDefinition operation. The content consists of the detailed metadata and data file information.

" + } }, - "documentation":"Request of PutReportDefinition" + "documentation":"

Creates a Cost and Usage Report.

" }, "PutReportDefinitionResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of PutReportDefinition" + "documentation":"

If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

" + }, + "RefreshClosedReports":{ + "type":"boolean", + "box":true }, "ReportDefinition":{ "type":"structure", @@ -190,62 +204,86 @@ "TimeUnit":{"shape":"TimeUnit"}, "Format":{"shape":"ReportFormat"}, "Compression":{"shape":"CompressionFormat"}, - "AdditionalSchemaElements":{"shape":"SchemaElementList"}, + "AdditionalSchemaElements":{ + "shape":"SchemaElementList", + "documentation":"

A list of strings that indicate additional content that Amazon Web Services includes in the report, such as individual resource IDs.

" + }, "S3Bucket":{"shape":"S3Bucket"}, "S3Prefix":{"shape":"S3Prefix"}, "S3Region":{"shape":"AWSRegion"}, - "AdditionalArtifacts":{"shape":"AdditionalArtifactList"} + "AdditionalArtifacts":{ + "shape":"AdditionalArtifactList", + "documentation":"

A list of manifests that you want Amazon Web Services to create for this report.

" + }, + "RefreshClosedReports":{ + "shape":"RefreshClosedReports", + "documentation":"

Whether you want Amazon Web Services to update your reports after they have been finalized if Amazon Web Services detects charges related to previous months. These charges can include refunds, credits, or support fees.

" + }, + "ReportVersioning":{ + "shape":"ReportVersioning", + "documentation":"

Whether you want Amazon Web Services to overwrite the previous version of each report or to deliver the report in addition to the previous versions.

" + } }, - "documentation":"The definition of AWS Cost and Usage Report. Customer can specify the report name, time unit, report format, compression format, S3 bucket and additional artifacts and schema elements in the definition." + "documentation":"

The definition of AWS Cost and Usage Report. You can specify the report name, time unit, report format, compression format, S3 bucket, additional artifacts, and schema elements in the definition.

" }, "ReportDefinitionList":{ "type":"list", "member":{"shape":"ReportDefinition"}, - "documentation":"A list of report definitions." + "documentation":"

A list of report definitions.

" }, "ReportFormat":{ "type":"string", - "documentation":"Preferred format for report.", - "enum":["textORcsv"] + "documentation":"

The format that AWS saves the report in.

", + "enum":[ + "textORcsv", + "Parquet" + ] }, "ReportLimitReachedException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"This exception is thrown when the number of report preference reaches max limit. The max number is 5.", + "documentation":"

This account already has five reports defined. To define a new report, you must delete an existing report.

", "exception":true }, "ReportName":{ "type":"string", - "documentation":"Preferred name for a report, it has to be unique. Must starts with a number/letter, case sensitive. Limited to 256 characters.", + "documentation":"

The name of the report that you want to create. The name must be unique, is case sensitive, and can't include spaces.

", "max":256, "pattern":"[0-9A-Za-z!\\-_.*\\'()]+" }, + "ReportVersioning":{ + "type":"string", + "enum":[ + "CREATE_NEW_REPORT", + "OVERWRITE_REPORT" + ] + }, "S3Bucket":{ "type":"string", - "documentation":"Name of customer S3 bucket.", + "documentation":"

The S3 bucket where AWS delivers the report.

", "max":256 }, "S3Prefix":{ "type":"string", - "documentation":"Preferred report path prefix. Limited to 256 characters.", + "documentation":"

The prefix that AWS adds to the report name when AWS delivers the report. Your prefix can't include spaces.

", "max":256, "pattern":"[0-9A-Za-z!\\-_.*\\'()/]*" }, "SchemaElement":{ "type":"string", - "documentation":"Preference of including Resource IDs. You can include additional details about individual resource IDs in your report.", + "documentation":"

Whether or not AWS includes resource IDs in the report.

", "enum":["RESOURCES"] }, "SchemaElementList":{ "type":"list", "member":{"shape":"SchemaElement"}, - "documentation":"A list of schema elements." + "documentation":"

A list of strings that indicate the content that is included in the report, such as service or usage type.

" }, "TimeUnit":{ "type":"string", - "documentation":"The frequency on which report data are measured and displayed.", + "documentation":"

The length of time covered by the report.

", "enum":[ "HOURLY", "DAILY" @@ -256,9 +294,9 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"This exception is thrown when providing an invalid input. eg. Put a report preference with an invalid report name, or Delete a report preference with an empty report name.", + "documentation":"

The input fails to satisfy the constraints specified by an AWS service.

", "exception":true } }, - "documentation":"All public APIs for AWS Cost and Usage Report service" + "documentation":"

The AWS Cost and Usage Report API enables you to programmatically create, query, and delete AWS Cost and Usage report definitions.

AWS Cost and Usage reports track the monthly AWS costs and usage associated with your AWS account. The report contains line items for each unique combination of AWS product, usage type, and operation that your AWS account uses. You can configure the AWS Cost and Usage report to show only the data that you want, using the AWS Cost and Usage API.

Service Endpoint

The AWS Cost and Usage Report API provides the following endpoint:

" } diff --git a/botocore/data/datasync/2018-11-09/paginators-1.json b/botocore/data/datasync/2018-11-09/paginators-1.json index ea142457..c6892f78 100644 --- a/botocore/data/datasync/2018-11-09/paginators-1.json +++ b/botocore/data/datasync/2018-11-09/paginators-1.json @@ -1,3 +1,34 @@ { - "pagination": {} + "pagination": { + "ListAgents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Agents" + }, + "ListLocations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Locations" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" + }, + "ListTaskExecutions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TaskExecutions" + }, + "ListTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tasks" + } + } } diff --git a/botocore/data/dax/2017-04-19/paginators-1.json b/botocore/data/dax/2017-04-19/paginators-1.json index ea142457..c13b2df9 100644 --- a/botocore/data/dax/2017-04-19/paginators-1.json +++ b/botocore/data/dax/2017-04-19/paginators-1.json @@ -1,3 +1,45 @@ { - "pagination": {} + "pagination": { + "DescribeClusters": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Clusters" + }, + "DescribeDefaultParameters": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Parameters" + }, + "DescribeEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Events" + }, + "DescribeParameterGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ParameterGroups" + }, + "DescribeParameters": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Parameters" + }, + "DescribeSubnetGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SubnetGroups" + }, + "ListTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Tags" + } + } } diff --git a/botocore/data/devicefarm/2015-06-23/paginators-1.json b/botocore/data/devicefarm/2015-06-23/paginators-1.json index 55ee7871..982e07f9 100644 --- a/botocore/data/devicefarm/2015-06-23/paginators-1.json +++ b/botocore/data/devicefarm/2015-06-23/paginators-1.json @@ -72,6 +72,39 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "offerings" + }, + "ListDeviceInstances": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "deviceInstances" + }, + "ListInstanceProfiles": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "instanceProfiles" + }, + "ListNetworkProfiles": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "networkProfiles" + }, + "ListOfferingPromotions": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "offeringPromotions" + }, + "ListRemoteAccessSessions": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "remoteAccessSessions" + }, + "ListVPCEConfigurations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "vpceConfigurations" } } } diff --git a/botocore/data/devicefarm/2015-06-23/service-2.json b/botocore/data/devicefarm/2015-06-23/service-2.json index 7f45e898..6d6e4719 100644 --- a/botocore/data/devicefarm/2015-06-23/service-2.json +++ b/botocore/data/devicefarm/2015-06-23/service-2.json @@ -1122,7 +1122,7 @@ }, "type":{ "shape":"ArtifactType", - "documentation":"

The artifact's type.

Allowed values include the following:

" + "documentation":"

The artifact's type.

Allowed values include the following:

" }, "extension":{ "shape":"String", @@ -1273,6 +1273,10 @@ "rules":{ "shape":"Rules", "documentation":"

The device pool's rules.

" + }, + "maxDevices":{ + "shape":"Integer", + "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and that meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

" } }, "documentation":"

Represents a request to the create device pool operation.

" @@ -1513,7 +1517,7 @@ }, "type":{ "shape":"UploadType", - "documentation":"

The upload's upload type.

Must be one of the following values:

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

" + "documentation":"

The upload's upload type.

Must be one of the following values:

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

" }, "contentType":{ "shape":"ContentType", @@ -1805,7 +1809,7 @@ }, "availability":{ "shape":"DeviceAvailability", - "documentation":"

Reflects how likely a device will be available for a test run.

" + "documentation":"

Reflects how likely a device will be available for a test run. It is currently available in the ListDevices and GetDevice API methods.

" } }, "documentation":"

Represents a device type that an app is tested against.

" @@ -1822,7 +1826,10 @@ "APPIUM_VERSION", "INSTANCE_ARN", "INSTANCE_LABELS", - "FLEET_TYPE" + "FLEET_TYPE", + "OS_VERSION", + "MODEL", + "AVAILABILITY" ] }, "DeviceAvailability":{ @@ -1839,18 +1846,18 @@ "members":{ "attribute":{ "shape":"DeviceFilterAttribute", - "documentation":"

The aspect of a device such as platform or model used as the selection criteria in a device filter.

Allowed values include:

" + "documentation":"

The aspect of a device such as platform or model used as the selection criteria in a device filter.

The supported operators for each attribute are provided in the following list.

ARN

The Amazon Resource Name (ARN) of the device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".

Supported operators: EQUALS, IN, NOT_IN

PLATFORM

The device platform. Valid values are \"ANDROID\" or \"IOS\".

Supported operators: EQUALS

OS_VERSION

The operating system version. For example, \"10.3.2\".

Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, LESS_THAN_OR_EQUALS, NOT_IN

MODEL

The device model. For example, \"iPad 5th Gen\".

Supported operators: CONTAINS, EQUALS, IN, NOT_IN

AVAILABILITY

The current availability of the device. Valid values are \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

Supported operators: EQUALS

FORM_FACTOR

The device form factor. Valid values are \"PHONE\" or \"TABLET\".

Supported operators: EQUALS

MANUFACTURER

The device manufacturer. For example, \"Apple\".

Supported operators: EQUALS, IN, NOT_IN

REMOTE_ACCESS_ENABLED

Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

REMOTE_DEBUG_ENABLED

Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

INSTANCE_ARN

The Amazon Resource Name (ARN) of the device instance.

Supported operators: EQUALS, IN, NOT_IN

INSTANCE_LABELS

The label of the device instance.

Supported operators: CONTAINS

FLEET_TYPE

The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".

Supported operators: EQUALS

" }, "operator":{ - "shape":"DeviceFilterOperator", - "documentation":"

The filter operator.

" + "shape":"RuleOperator", + "documentation":"

Specifies how Device Farm compares the filter's attribute to the value. For the operators that are supported by each attribute, see the attribute descriptions.

" }, "values":{ "shape":"DeviceFilterValues", - "documentation":"

An array of one or more filter values used in a device filter.

Operator Values

Attribute Values

" + "documentation":"

An array of one or more filter values used in a device filter.

Operator Values

Attribute Values

" } }, - "documentation":"

Represents a device filter used to select a set of devices to be included in a test run. This data structure is passed in as the \"deviceSelectionConfiguration\" parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

It is also passed in as the \"filters\" parameter to ListDevices. For an example of the JSON request syntax, see ListDevices.

" + "documentation":"

Represents a device filter used to select a set of devices to be included in a test run. This data structure is passed in as the deviceSelectionConfiguration parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

It is also passed in as the filters parameter to ListDevices. For an example of the JSON request syntax, see ListDevices.

" }, "DeviceFilterAttribute":{ "type":"string", @@ -1869,19 +1876,6 @@ "FLEET_TYPE" ] }, - "DeviceFilterOperator":{ - "type":"string", - "enum":[ - "EQUALS", - "LESS_THAN", - "LESS_THAN_OR_EQUALS", - "GREATER_THAN", - "GREATER_THAN_OR_EQUALS", - "IN", - "NOT_IN", - "CONTAINS" - ] - }, "DeviceFilterValues":{ "type":"list", "member":{"shape":"String"} @@ -1982,6 +1976,10 @@ "rules":{ "shape":"Rules", "documentation":"

Information about the device pool's rules.

" + }, + "maxDevices":{ + "shape":"Integer", + "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and that meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

" } }, "documentation":"

Represents a collection of device types.

" @@ -2028,14 +2026,14 @@ "members":{ "filters":{ "shape":"DeviceFilters", - "documentation":"

Used to dynamically select a set of devices for a test run. A filter is made up of an attribute, an operator, and one or more values.

" + "documentation":"

Used to dynamically select a set of devices for a test run. A filter is made up of an attribute, an operator, and one or more values.

" }, "maxDevices":{ "shape":"Integer", "documentation":"

The maximum number of devices to be included in a test run.

" } }, - "documentation":"

Represents the device filters used in a test run as well as the maximum number of devices to be included in the run. It is passed in as the deviceSelectionConfiguration request parameter in ScheduleRun.

" + "documentation":"

Represents the device filters used in a test run as well as the maximum number of devices to be included in the run. It is passed in as the deviceSelectionConfiguration request parameter in ScheduleRun.

" }, "DeviceSelectionResult":{ "type":"structure", @@ -2173,7 +2171,7 @@ }, "testType":{ "shape":"TestType", - "documentation":"

The test type for the specified device pool.

Allowed values include the following:

" + "documentation":"

The test type for the specified device pool.

Allowed values include the following:

" }, "test":{ "shape":"ScheduleRunTest", @@ -2618,7 +2616,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The job's type.

Allowed values include the following:

" + "documentation":"

The job's type.

Allowed values include the following:

" }, "created":{ "shape":"DateTime", @@ -2795,7 +2793,7 @@ }, "filters":{ "shape":"DeviceFilters", - "documentation":"

Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.

" + "documentation":"

Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.

" } }, "documentation":"

Represents the result of a list devices request.

" @@ -3182,7 +3180,7 @@ }, "type":{ "shape":"UploadType", - "documentation":"

The type of upload.

Must be one of the following values:

" + "documentation":"

The type of upload.

Must be one of the following values:

" }, "nextToken":{ "shape":"PaginationToken", @@ -3730,7 +3728,7 @@ }, "billingMethod":{ "shape":"BillingMethod", - "documentation":"

The billing method of the remote access session. Possible values include METERED or UNMETERED. For more information about metered devices, see AWS Device Farm terminology.\"

" + "documentation":"

The billing method of the remote access session. Possible values include METERED or UNMETERED. For more information about metered devices, see AWS Device Farm terminology.\"

" }, "deviceMinutes":{ "shape":"DeviceMinutes", @@ -3802,25 +3800,27 @@ "members":{ "attribute":{ "shape":"DeviceAttribute", - "documentation":"

The rule's attribute. It is the aspect of a device such as platform or model used as selection criteria to create or update a device pool.

Allowed values include:

" + "documentation":"

The rule's stringified attribute. For example, specify the value as \"\\\"abc\\\"\".

The supported operators for each attribute are provided in the following list.

APPIUM_VERSION

The Appium version for the test.

Supported operators: CONTAINS

ARN

The Amazon Resource Name (ARN) of the device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".

Supported operators: EQUALS, IN, NOT_IN

AVAILABILITY

The current availability of the device. Valid values are \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

Supported operators: EQUALS

FLEET_TYPE

The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".

Supported operators: EQUALS

FORM_FACTOR

The device form factor. Valid values are \"PHONE\" or \"TABLET\".

Supported operators: EQUALS, IN, NOT_IN

INSTANCE_ARN

The Amazon Resource Name (ARN) of the device instance.

Supported operators: IN, NOT_IN

INSTANCE_LABELS

The label of the device instance.

Supported operators: CONTAINS

MANUFACTURER

The device manufacturer. For example, \"Apple\".

Supported operators: EQUALS, IN, NOT_IN

MODEL

The device model, such as \"Apple iPad Air 2\" or \"Google Pixel\".

Supported operators: CONTAINS, EQUALS, IN, NOT_IN

OS_VERSION

The operating system version. For example, \"10.3.2\".

Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, LESS_THAN_OR_EQUALS, NOT_IN

PLATFORM

The device platform. Valid values are \"ANDROID\" or \"IOS\".

Supported operators: EQUALS, IN, NOT_IN

REMOTE_ACCESS_ENABLED

Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

REMOTE_DEBUG_ENABLED

Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

" }, "operator":{ "shape":"RuleOperator", - "documentation":"

The rule's operator.

" + "documentation":"

Specifies how Device Farm compares the rule's attribute to the value. For the operators that are supported by each attribute, see the attribute descriptions.

" }, "value":{ "shape":"String", - "documentation":"

The rule's value.

The value must be passed in as a string using escaped quotes.

For example:

\"value\": \"\\\"ANDROID\\\"\"

" + "documentation":"

The rule's value.

" } }, - "documentation":"

Represents a condition for a device pool. It is passed in as the rules parameter to CreateDevicePool and UpdateDevicePool.

" + "documentation":"

Represents a condition for a device pool.

" }, "RuleOperator":{ "type":"string", "enum":[ "EQUALS", "LESS_THAN", + "LESS_THAN_OR_EQUALS", "GREATER_THAN", + "GREATER_THAN_OR_EQUALS", "IN", "NOT_IN", "CONTAINS" @@ -3843,7 +3843,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The run's type.

Must be one of the following values:

" + "documentation":"

The run's type.

Must be one of the following values:

" }, "platform":{ "shape":"DevicePlatform", @@ -4067,11 +4067,11 @@ }, "devicePoolArn":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the device pool for the run to be scheduled.

Either devicePoolArn or deviceSelectionConfiguration are required in a request.

" + "documentation":"

The ARN of the device pool for the run to be scheduled.

" }, "deviceSelectionConfiguration":{ "shape":"DeviceSelectionConfiguration", - "documentation":"

The filter criteria used to dynamically select a set of devices for a test run, as well as the maximum number of devices to be included in the run.

Either devicePoolArn or deviceSelectionConfiguration are required in a request.

" + "documentation":"

The filter criteria used to dynamically select a set of devices for a test run, as well as the maximum number of devices to be included in the run.

Either devicePoolArn or deviceSelectionConfiguration is required in a request.

" }, "name":{ "shape":"Name", @@ -4108,7 +4108,7 @@ "members":{ "type":{ "shape":"TestType", - "documentation":"

The test's type.

Must be one of the following values:

" + "documentation":"

The test's type.

Must be one of the following values:

" }, "testPackageArn":{ "shape":"AmazonResourceName", @@ -4124,7 +4124,7 @@ }, "parameters":{ "shape":"TestParameters", - "documentation":"

The test's parameters, such as test framework parameters and fixture settings. Parameters are represented by name-value pairs of strings.

For all tests:

For Calabash tests:

For Appium tests (all types):

For Fuzz tests (Android only):

For Explorer tests:

For Instrumentation:

For XCTest and XCTestUI:

For UIAutomator:

" + "documentation":"

The test's parameters, such as test framework parameters and fixture settings. Parameters are represented by name-value pairs of strings.

For all tests:

For Calabash tests:

For Appium tests (all types):

For Fuzz tests (Android only):

For Explorer tests:

For Instrumentation:

For XCTest and XCTestUI:

For UIAutomator:

" } }, "documentation":"

Represents test settings. This data structure is passed in as the \"test\" parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

" @@ -4226,7 +4226,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The suite's type.

Must be one of the following values:

" + "documentation":"

The suite's type.

Must be one of the following values:

" }, "created":{ "shape":"DateTime", @@ -4280,7 +4280,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The test's type.

Must be one of the following values:

" + "documentation":"

The test's type.

Must be one of the following values:

" }, "created":{ "shape":"DateTime", @@ -4331,9 +4331,13 @@ "APPIUM_JAVA_JUNIT", "APPIUM_JAVA_TESTNG", "APPIUM_PYTHON", + "APPIUM_NODE", + "APPIUM_RUBY", "APPIUM_WEB_JAVA_JUNIT", "APPIUM_WEB_JAVA_TESTNG", "APPIUM_WEB_PYTHON", + "APPIUM_WEB_NODE", + "APPIUM_WEB_RUBY", "CALABASH", "INSTRUMENTATION", "UIAUTOMATION", @@ -4440,6 +4444,14 @@ "rules":{ "shape":"Rules", "documentation":"

Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.

" + }, + "maxDevices":{ + "shape":"Integer", + "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and that meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

If you use this parameter in your request, you cannot use the clearMaxDevices parameter in the same request.

" + }, + "clearMaxDevices":{ + "shape":"Boolean", + "documentation":"

Sets whether the maxDevices parameter applies to your device pool. If you set this parameter to true, the maxDevices parameter does not apply, and Device Farm does not limit the number of devices that it adds to your device pool. In this case, Device Farm adds all available devices that meet the criteria that are specified for the rules parameter.

If you use this parameter in your request, you cannot use the maxDevices parameter in the same request.

" } }, "documentation":"

Represents a request to the update device pool operation.

" @@ -4668,7 +4680,7 @@ }, "type":{ "shape":"UploadType", - "documentation":"

The upload's type.

Must be one of the following values:

" + "documentation":"

The upload's type.

Must be one of the following values:

" }, "status":{ "shape":"UploadStatus", @@ -4723,9 +4735,13 @@ "APPIUM_JAVA_JUNIT_TEST_PACKAGE", "APPIUM_JAVA_TESTNG_TEST_PACKAGE", "APPIUM_PYTHON_TEST_PACKAGE", + "APPIUM_NODE_TEST_PACKAGE", + "APPIUM_RUBY_TEST_PACKAGE", "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE", "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE", "APPIUM_WEB_PYTHON_TEST_PACKAGE", + "APPIUM_WEB_NODE_TEST_PACKAGE", + "APPIUM_WEB_RUBY_TEST_PACKAGE", "CALABASH_TEST_PACKAGE", "INSTRUMENTATION_TEST_PACKAGE", "UIAUTOMATION_TEST_PACKAGE", @@ -4735,9 +4751,13 @@ "APPIUM_JAVA_JUNIT_TEST_SPEC", "APPIUM_JAVA_TESTNG_TEST_SPEC", "APPIUM_PYTHON_TEST_SPEC", + "APPIUM_NODE_TEST_SPEC", + "APPIUM_RUBY_TEST_SPEC", "APPIUM_WEB_JAVA_JUNIT_TEST_SPEC", "APPIUM_WEB_JAVA_TESTNG_TEST_SPEC", "APPIUM_WEB_PYTHON_TEST_SPEC", + "APPIUM_WEB_NODE_TEST_SPEC", + "APPIUM_WEB_RUBY_TEST_SPEC", "INSTRUMENTATION_TEST_SPEC", "XCTEST_UI_TEST_SPEC" ] diff --git a/botocore/data/directconnect/2012-10-25/paginators-1.json b/botocore/data/directconnect/2012-10-25/paginators-1.json index ea142457..dbca668f 100644 --- a/botocore/data/directconnect/2012-10-25/paginators-1.json +++ b/botocore/data/directconnect/2012-10-25/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "DescribeDirectConnectGatewayAssociations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "directConnectGatewayAssociations" + }, + "DescribeDirectConnectGatewayAttachments": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "directConnectGatewayAttachments" + }, + "DescribeDirectConnectGateways": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "directConnectGateways" + } + } } diff --git a/botocore/data/directconnect/2012-10-25/service-2.json b/botocore/data/directconnect/2012-10-25/service-2.json index 181c0100..9c382b9b 100644 --- a/botocore/data/directconnect/2012-10-25/service-2.json +++ b/botocore/data/directconnect/2012-10-25/service-2.json @@ -375,7 +375,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for a connection.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

", + "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for a connection.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

", "deprecated":true }, "DescribeConnections":{ @@ -475,7 +475,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for the specified interconnect.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

", + "documentation":"

Deprecated. Use DescribeLoa instead.

Gets the LOA-CFA for the specified interconnect.

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

", "deprecated":true }, "DescribeInterconnects":{ @@ -518,7 +518,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Gets the LOA-CFA for a connection, interconnect, or link aggregation group (LAG).

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

" + "documentation":"

Gets the LOA-CFA for a connection, interconnect, or link aggregation group (LAG).

The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect User Guide.

" }, "DescribeLocations":{ "name":"DescribeLocations", @@ -855,7 +855,7 @@ }, "bgpStatus":{ "shape":"BGPStatus", - "documentation":"

The status of the BGP peer. The following are the possible values:

" + "documentation":"

The status of the BGP peer. The following are the possible values:

" }, "awsDeviceV2":{ "shape":"AwsDeviceV2", @@ -883,7 +883,8 @@ "type":"string", "enum":[ "up", - "down" + "down", + "unknown" ] }, "Bandwidth":{"type":"string"}, @@ -904,7 +905,7 @@ "members":{ "connectionState":{ "shape":"ConnectionState", - "documentation":"

The state of the connection. The following are the possible values:

" + "documentation":"

The state of the connection. The following are the possible values:

" } } }, @@ -931,7 +932,7 @@ "members":{ "virtualInterfaceState":{ "shape":"VirtualInterfaceState", - "documentation":"

The state of the virtual interface. The following are the possible values:

" + "documentation":"

The state of the virtual interface. The following are the possible values:

" } } }, @@ -950,7 +951,7 @@ "members":{ "virtualInterfaceState":{ "shape":"VirtualInterfaceState", - "documentation":"

The state of the virtual interface. The following are the possible values:

" + "documentation":"

The state of the virtual interface. The following are the possible values:

" } } }, @@ -971,7 +972,7 @@ }, "connectionState":{ "shape":"ConnectionState", - "documentation":"

The state of the connection. The following are the possible values:

" + "documentation":"

The state of the connection. The following are the possible values:

" }, "region":{ "shape":"Region", @@ -1036,7 +1037,8 @@ "down", "deleting", "deleted", - "rejected" + "rejected", + "unknown" ] }, "Connections":{ @@ -1338,7 +1340,7 @@ "members":{ "interconnectState":{ "shape":"InterconnectState", - "documentation":"

The state of the interconnect. The following are the possible values:

" + "documentation":"

The state of the interconnect. The following are the possible values:

" } } }, @@ -1367,7 +1369,7 @@ "members":{ "virtualInterfaceState":{ "shape":"VirtualInterfaceState", - "documentation":"

The state of the virtual interface. The following are the possible values:

" + "documentation":"

The state of the virtual interface. The following are the possible values:

" } } }, @@ -1813,7 +1815,7 @@ }, "interconnectState":{ "shape":"InterconnectState", - "documentation":"

The state of the interconnect. The following are the possible values:

" + "documentation":"

The state of the interconnect. The following are the possible values:

" }, "region":{ "shape":"Region", @@ -1868,7 +1870,8 @@ "available", "down", "deleting", - "deleted" + "deleted", + "unknown" ] }, "Interconnects":{ @@ -1906,7 +1909,7 @@ }, "lagState":{ "shape":"LagState", - "documentation":"

The state of the LAG. The following are the possible values:

" + "documentation":"

The state of the LAG. The following are the possible values:

" }, "location":{ "shape":"LocationCode", @@ -1961,7 +1964,8 @@ "available", "down", "deleting", - "deleted" + "deleted", + "unknown" ] }, "Lags":{ @@ -2489,7 +2493,7 @@ }, "virtualInterfaceState":{ "shape":"VirtualInterfaceState", - "documentation":"

The state of the virtual interface. The following are the possible values:

" + "documentation":"

The state of the virtual interface. The following are the possible values:

" }, "customerRouterConfig":{ "shape":"RouterConfig", @@ -2547,7 +2551,8 @@ "down", "deleting", "deleted", - "rejected" + "rejected", + "unknown" ] }, "VirtualInterfaceType":{"type":"string"}, diff --git a/botocore/data/discovery/2015-11-01/paginators-1.json b/botocore/data/discovery/2015-11-01/paginators-1.json index ea142457..9afc883b 100644 --- a/botocore/data/discovery/2015-11-01/paginators-1.json +++ b/botocore/data/discovery/2015-11-01/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "DescribeAgents": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "agentsInfo" + }, + "DescribeContinuousExports": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "descriptions" + }, + "DescribeExportConfigurations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "exportsInfo" + }, + "DescribeExportTasks": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "exportsInfo" + }, + "DescribeTags": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "tags" + }, + "ListConfigurations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "configurations" + } + } } diff --git a/botocore/data/discovery/2015-11-01/service-2.json b/botocore/data/discovery/2015-11-01/service-2.json index 3cc37b2d..c10c37bb 100644 --- a/botocore/data/discovery/2015-11-01/service-2.json +++ b/botocore/data/discovery/2015-11-01/service-2.json @@ -28,6 +28,21 @@ ], "documentation":"

Associates one or more configuration items with an application.

" }, + "BatchDeleteImportData":{ + "name":"BatchDeleteImportData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteImportDataRequest"}, + "output":{"shape":"BatchDeleteImportDataResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ], + "documentation":"

Deletes one or more import tasks, each identified by their import ID. Each import task has a number of records that can identify servers or applications.

AWS Application Discovery Service has built-in matching logic that will identify when discovered servers match existing entries that you've previously discovered, the information for the already-existing discovered server is updated. When you delete an import task that contains records that were used to match, the information in those matched records that comes from the deleted records will also be deleted.

" + }, "CreateApplication":{ "name":"CreateApplication", "http":{ @@ -124,7 +139,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Retrieves attributes for a list of configuration item IDs.

All of the supplied IDs must be for the same asset type from one of the follwoing:

Output fields are specific to the asset type specified. For example, the output for a server configuration item includes a list of attributes about the server, such as host name, operating system, number of network cards, etc.

For a complete list of outputs for each asset type, see Using the DescribeConfigurations Action.

" + "documentation":"

Retrieves attributes for a list of configuration item IDs.

All of the supplied IDs must be for the same asset type from one of the following:

Output fields are specific to the asset type specified. For example, the output for a server configuration item includes a list of attributes about the server, such as host name, operating system, number of network cards, etc.

For a complete list of outputs for each asset type, see Using the DescribeConfigurations Action.

" }, "DescribeContinuousExports":{ "name":"DescribeContinuousExports", @@ -159,7 +174,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

DescribeExportConfigurations is deprecated.

Use instead DescribeExportTasks .

", + "documentation":"

DescribeExportConfigurations is deprecated. Use DescribeImportTasks, instead.

", "deprecated":true }, "DescribeExportTasks":{ @@ -178,6 +193,21 @@ ], "documentation":"

Retrieve status of one or more export tasks. You can retrieve the status of up to 100 export tasks.

" }, + "DescribeImportTasks":{ + "name":"DescribeImportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportTasksRequest"}, + "output":{"shape":"DescribeImportTasksResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ], + "documentation":"

Returns an array of import tasks for your account, including status information, times, IDs, the Amazon S3 Object URL for the import file, and more.

" + }, "DescribeTags":{ "name":"DescribeTags", "http":{ @@ -329,6 +359,22 @@ ], "documentation":"

Begins the export of discovered data to an S3 bucket.

If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports.

If you do not include an agentIds filter, summary data is exported that includes both AWS Agentless Discovery Connector data and summary data from AWS Discovery Agents. Export of summary data is limited to two exports per day.

" }, + "StartImportTask":{ + "name":"StartImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartImportTaskRequest"}, + "output":{"shape":"StartImportTaskResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ], + "documentation":"

Starts an import task, which allows you to import details of your on-premises environment directly into AWS without having to use the Application Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

To start an import request, do this:

  1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

  2. Fill out the template with your server and application data.

  3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

  4. Use the console or the StartImportTask command with the AWS CLI or one of the AWS SDKs to import the records from your file.

For more information, including step-by-step procedures, see Migration Hub Import in the AWS Application Discovery Service User Guide.

There are limits to the number of import tasks you can create (and delete) in an AWS account. For more information, see AWS Application Discovery Service Limits in the AWS Application Discovery Service User Guide.

" + }, "StopContinuousExport":{ "name":"StopContinuousExport", "http":{ @@ -523,11 +569,65 @@ "documentation":"

The AWS user account does not have permission to perform the action. Check the IAM policy associated with this account.

", "exception":true }, + "BatchDeleteImportDataError":{ + "type":"structure", + "members":{ + "importTaskId":{ + "shape":"ImportTaskIdentifier", + "documentation":"

The unique import ID associated with the error that occurred.

" + }, + "errorCode":{ + "shape":"BatchDeleteImportDataErrorCode", + "documentation":"

The type of error that occurred for a specific import task.

" + }, + "errorDescription":{ + "shape":"BatchDeleteImportDataErrorDescription", + "documentation":"

The description of the error that occurred for a specific import task.

" + } + }, + "documentation":"

Error messages returned for each import task that you deleted as a response for this command.

" + }, + "BatchDeleteImportDataErrorCode":{ + "type":"string", + "enum":[ + "NOT_FOUND", + "INTERNAL_SERVER_ERROR" + ] + }, + "BatchDeleteImportDataErrorDescription":{"type":"string"}, + "BatchDeleteImportDataErrorList":{ + "type":"list", + "member":{"shape":"BatchDeleteImportDataError"} + }, + "BatchDeleteImportDataRequest":{ + "type":"structure", + "required":["importTaskIds"], + "members":{ + "importTaskIds":{ + "shape":"ToDeleteIdentifierList", + "documentation":"

The IDs for the import tasks that you want to delete.

" + } + } + }, + "BatchDeleteImportDataResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchDeleteImportDataErrorList", + "documentation":"

Error messages returned for each import task that you deleted as a response for this command.

" + } + } + }, "Boolean":{"type":"boolean"}, "BoxedInteger":{ "type":"integer", "box":true }, + "ClientRequestToken":{ + "type":"string", + "max":100, + "min":1 + }, "Condition":{"type":"string"}, "Configuration":{ "type":"map", @@ -605,7 +705,7 @@ }, "statusDetail":{ "shape":"StringMax255", - "documentation":"

Contains information about any errors that may have occurred.

" + "documentation":"

Contains information about any errors that have occurred. This data type can have the following values:

" }, "s3Bucket":{ "shape":"S3Bucket", @@ -986,6 +1086,46 @@ } } }, + "DescribeImportTasksFilterList":{ + "type":"list", + "member":{"shape":"ImportTaskFilter"} + }, + "DescribeImportTasksMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "DescribeImportTasksRequest":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"DescribeImportTasksFilterList", + "documentation":"

An array of name-value pairs that you provide to filter the results for the DescribeImportTask request to a specific subset of results. Currently, wildcard values aren't supported for filters.

" + }, + "maxResults":{ + "shape":"DescribeImportTasksMaxResults", + "documentation":"

The maximum number of results that you want this request to return, up to 100.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to request a specific page of results.

" + } + } + }, + "DescribeImportTasksResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to request the next page of results.

" + }, + "tasks":{ + "shape":"ImportTaskList", + "documentation":"

A returned array of import tasks that match any applied filters, up to the specified number of maximum results.

" + } + } + }, "DescribeTagsRequest":{ "type":"structure", "members":{ @@ -1214,6 +1354,126 @@ } } }, + "ImportStatus":{ + "type":"string", + "enum":[ + "IMPORT_IN_PROGRESS", + "IMPORT_COMPLETE", + "IMPORT_FAILED", + "IMPORT_FAILED_SERVER_LIMIT_EXCEEDED", + "IMPORT_FAILED_RECORD_LIMIT_EXCEEDED", + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED", + "DELETE_FAILED_LIMIT_EXCEEDED" + ] + }, + "ImportTask":{ + "type":"structure", + "members":{ + "importTaskId":{ + "shape":"ImportTaskIdentifier", + "documentation":"

The unique ID for a specific import task. These IDs aren't globally unique, but they are unique within an AWS account.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

A unique token used to prevent the same import request from occurring more than once. If you didn't provide a token, a token was automatically generated when the import task request was sent.

" + }, + "name":{ + "shape":"ImportTaskName", + "documentation":"

A descriptive name for an import task. You can use this name to filter future requests related to this import task, such as identifying applications and servers that were included in this import task. We recommend that you use a meaningful name for each import task.

" + }, + "importUrl":{ + "shape":"ImportURL", + "documentation":"

The URL for your import file that you've uploaded to Amazon S3.

" + }, + "status":{ + "shape":"ImportStatus", + "documentation":"

The status of the import task. An import can have the status of IMPORT_COMPLETE and still have some records fail to import from the overall request. More information can be found in the downloadable archive defined in the errorsAndFailedEntriesZip field, or in the Migration Hub management console.

" + }, + "importRequestTime":{ + "shape":"TimeStamp", + "documentation":"

The time that the import task request was made, presented in the Unix time stamp format.

" + }, + "importCompletionTime":{ + "shape":"TimeStamp", + "documentation":"

The time that the import task request finished, presented in the Unix time stamp format.

" + }, + "importDeletedTime":{ + "shape":"TimeStamp", + "documentation":"

The time that the import task request was deleted, presented in the Unix time stamp format.

" + }, + "serverImportSuccess":{ + "shape":"Integer", + "documentation":"

The total number of server records in the import file that were successfully imported.

" + }, + "serverImportFailure":{ + "shape":"Integer", + "documentation":"

The total number of server records in the import file that failed to be imported.

" + }, + "applicationImportSuccess":{ + "shape":"Integer", + "documentation":"

The total number of application records in the import file that were successfully imported.

" + }, + "applicationImportFailure":{ + "shape":"Integer", + "documentation":"

The total number of application records in the import file that failed to be imported.

" + }, + "errorsAndFailedEntriesZip":{ + "shape":"S3PresignedUrl", + "documentation":"

A link to a compressed archive folder (in the ZIP format) that contains an error log and a file of failed records. You can use these two files to quickly identify records that failed, why they failed, and correct those records. Afterward, you can upload the corrected file to your Amazon S3 bucket and create another import task request.

This field also includes authorization information so you can confirm the authenticity of the compressed archive before you download it.

If some records failed to be imported we recommend that you correct the records in the failed entries file and then imports that failed entries file. This prevents you from having to correct and update the larger original file and attempt importing it again.

" + } + }, + "documentation":"

An array of information related to the import task request that includes status information, times, IDs, the Amazon S3 Object URL for the import file, and more.

" + }, + "ImportTaskFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ImportTaskFilterName", + "documentation":"

The name, status, or import task ID for a specific import task.

" + }, + "values":{ + "shape":"ImportTaskFilterValueList", + "documentation":"

An array of strings that you can provide to match against a specific name, status, or import task ID to filter the results for your import task queries.

" + } + }, + "documentation":"

A name-values pair of elements you can use to filter the results when querying your import tasks. Currently, wildcards are not supported for filters.

When filtering by import status, all other filter values are ignored.

" + }, + "ImportTaskFilterName":{ + "type":"string", + "enum":[ + "IMPORT_TASK_ID", + "STATUS", + "NAME" + ] + }, + "ImportTaskFilterValue":{ + "type":"string", + "max":100, + "min":1 + }, + "ImportTaskFilterValueList":{ + "type":"list", + "member":{"shape":"ImportTaskFilterValue"}, + "max":100, + "min":1 + }, + "ImportTaskIdentifier":{"type":"string"}, + "ImportTaskList":{ + "type":"list", + "member":{"shape":"ImportTask"} + }, + "ImportTaskName":{ + "type":"string", + "max":100, + "min":1 + }, + "ImportURL":{ + "type":"string", + "max":4000, + "min":1 + }, "Integer":{"type":"integer"}, "InvalidParameterException":{ "type":"structure", @@ -1384,7 +1644,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

", + "documentation":"

This issue occurs when the same clientRequestToken is used with the StartImportTask action, but with different parameters. For example, you use the same request token but have two different import URLs, you can encounter this issue. If the import tasks are meant to be different, use a different clientRequestToken, and try again.

", "exception":true }, "ResourceNotFoundException":{ @@ -1396,6 +1656,7 @@ "exception":true }, "S3Bucket":{"type":"string"}, + "S3PresignedUrl":{"type":"string"}, "SchemaStorageConfig":{ "type":"map", "key":{"shape":"DatabaseName"}, @@ -1489,6 +1750,37 @@ } } }, + "StartImportTaskRequest":{ + "type":"structure", + "required":[ + "name", + "importUrl" + ], + "members":{ + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

Optional. A unique token that you can provide to prevent the same import request from occurring more than once. If you don't provide a token, a token is automatically generated.

Sending more than one StartImportTask request with the same client request token will return information about the original import task with that client request token.

", + "idempotencyToken":true + }, + "name":{ + "shape":"ImportTaskName", + "documentation":"

A descriptive name for this request. You can use this name to filter future requests related to this import task, such as identifying applications and servers that were included in this import task. We recommend that you use a meaningful name for each import task.

" + }, + "importUrl":{ + "shape":"ImportURL", + "documentation":"

The URL for your import file that you've uploaded to Amazon S3.

If you're using the AWS CLI, this URL is structured as follows: s3://BucketName/ImportFileName.CSV

" + } + } + }, + "StartImportTaskResponse":{ + "type":"structure", + "members":{ + "task":{ + "shape":"ImportTask", + "documentation":"

An array of information related to the import task request including status information, times, IDs, the Amazon S3 Object URL for the import file, and more.

" + } + } + }, "StopContinuousExportRequest":{ "type":"structure", "required":["exportId"], @@ -1584,6 +1876,12 @@ }, "TagValue":{"type":"string"}, "TimeStamp":{"type":"timestamp"}, + "ToDeleteIdentifierList":{ + "type":"list", + "member":{"shape":"ImportTaskIdentifier"}, + "max":10, + "min":1 + }, "UpdateApplicationRequest":{ "type":"structure", "required":["configurationId"], @@ -1615,5 +1913,5 @@ ] } }, - "documentation":"AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation:

We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

Your AWS account must be granted access to Application Discovery Service, a process called whitelisting. This is true for AWS partners and customers alike. To request access, sign up for Application Discovery Service.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

" + "documentation":"AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation:

We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

" } diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json index 294ad6c0..cd06a436 100644 --- a/botocore/data/dlm/2018-01-12/service-2.json +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -521,7 +521,10 @@ "member":{"shape":"Time"}, "max":1 }, - "Timestamp":{"type":"timestamp"}, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "UpdateLifecyclePolicyRequest":{ "type":"structure", "required":["PolicyId"], @@ -556,5 +559,5 @@ } } }, - "documentation":"Amazon Data Lifecycle Manager

With Amazon Data Lifecycle Manager, you can manage the lifecycle of your AWS resources. You create lifecycle policies, which are used to automate operations on the specified resources.

Amazon DLM supports Amazon EBS volumes and snapshots. For information about using Amazon DLM with Amazon EBS, see Automating the Amazon EBS Snapshot Lifecycle in the Amazon EC2 User Guide.

" + "documentation":"Amazon Data Lifecycle Manager

With Amazon Data Lifecycle Manager, you can manage the lifecycle of your AWS resources. You create lifecycle policies, which are used to automate operations on the specified resources.

Amazon DLM supports Amazon EBS volumes and snapshots. For information about using Amazon DLM with Amazon EBS, see Automating the Amazon EBS Snapshot Lifecycle in the Amazon EC2 User Guide.

" } diff --git a/botocore/data/dms/2016-01-01/waiters-2.json b/botocore/data/dms/2016-01-01/waiters-2.json index 00727bc1..ebfc736e 100644 --- a/botocore/data/dms/2016-01-01/waiters-2.json +++ b/botocore/data/dms/2016-01-01/waiters-2.json @@ -4,22 +4,22 @@ "TestConnectionSucceeds":{ "acceptors":[ { - "argument":"Connection.Status", + "argument":"Connections[].Status", "expected":"successful", - "matcher":"path", + "matcher":"pathAll", "state":"success" }, { - "argument":"Connection.Status", + "argument":"Connections[].Status", "expected":"failed", - "matcher":"path", + "matcher":"pathAny", "state":"failure" } ], "delay":5, "description":"Wait until testing connection succeeds.", "maxAttempts":60, - "operation":"TestConnection" + "operation":"DescribeConnections" }, "EndpointDeleted":{ "acceptors":[ diff --git a/botocore/data/docdb/2014-10-31/paginators-1.json b/botocore/data/docdb/2014-10-31/paginators-1.json new file mode 100644 index 00000000..f747ff67 --- /dev/null +++ b/botocore/data/docdb/2014-10-31/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "DescribeDBClusters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusters" + }, + "DescribeDBEngineVersions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBInstances" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBSubnetGroups" + }, + "DescribeEvents": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Events" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "OrderableDBInstanceOptions" + } + } +} diff --git a/botocore/data/docdb/2014-10-31/service-2.json b/botocore/data/docdb/2014-10-31/service-2.json new file mode 100644 index 00000000..777a9e52 --- /dev/null +++ b/botocore/data/docdb/2014-10-31/service-2.json @@ -0,0 +1,3720 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-10-31", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon DocDB", + "serviceFullName":"Amazon DocumentDB with MongoDB compatibility", + "serviceId":"DocDB", + "signatureVersion":"v4", + "signingName":"rds", + "uid":"docdb-2014-10-31", + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Adds metadata tags to an Amazon DocumentDB resource. You can use these tags with cost allocation reporting to track costs that are associated with Amazon DocumentDB resources. or in a Condition statement in an AWS Identity and Access Management (IAM) policy for Amazon DocumentDB.

" + }, + "ApplyPendingMaintenanceAction":{ + "name":"ApplyPendingMaintenanceAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplyPendingMaintenanceActionMessage"}, + "output":{ + "shape":"ApplyPendingMaintenanceActionResult", + "resultWrapper":"ApplyPendingMaintenanceActionResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Applies a pending maintenance action to a resource (for example, to a DB instance).

" + }, + "CopyDBClusterParameterGroup":{ + "name":"CopyDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBClusterParameterGroupMessage"}, + "output":{ + "shape":"CopyDBClusterParameterGroupResult", + "resultWrapper":"CopyDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ], + "documentation":"

Copies the specified DB cluster parameter group.

" + }, + "CopyDBClusterSnapshot":{ + "name":"CopyDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBClusterSnapshotMessage"}, + "output":{ + "shape":"CopyDBClusterSnapshotResult", + "resultWrapper":"CopyDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ], + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

To cancel the copy operation after it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in the copying status.

" + }, + "CreateDBCluster":{ + "name":"CreateDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterMessage"}, + "output":{ + "shape":"CreateDBClusterResult", + "resultWrapper":"CreateDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"} + ], + "documentation":"

Creates a new Amazon DocumentDB DB cluster.

" + }, + "CreateDBClusterParameterGroup":{ + "name":"CreateDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterParameterGroupMessage"}, + "output":{ + "shape":"CreateDBClusterParameterGroupResult", + "resultWrapper":"CreateDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ], + "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after you create it. After you create a DB cluster parameter group, you must associate it with your DB cluster. For the new DB cluster parameter group and associated settings to take effect, you must then reboot the DB instances in the DB cluster without failover.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon DocumentDB to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This step is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter.

" + }, + "CreateDBClusterSnapshot":{ + "name":"CreateDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterSnapshotMessage"}, + "output":{ + "shape":"CreateDBClusterSnapshotResult", + "resultWrapper":"CreateDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"} + ], + "documentation":"

Creates a snapshot of a DB cluster.

" + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ], + "documentation":"

Creates a new DB instance.

" + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ], + "documentation":"

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.

" + }, + "DeleteDBCluster":{ + "name":"DeleteDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterMessage"}, + "output":{ + "shape":"DeleteDBClusterResult", + "resultWrapper":"DeleteDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"} + ], + "documentation":"

Deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

" + }, + "DeleteDBClusterParameterGroup":{ + "name":"DeleteDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

" + }, + "DeleteDBClusterSnapshot":{ + "name":"DeleteDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterSnapshotMessage"}, + "output":{ + "shape":"DeleteDBClusterSnapshotResult", + "resultWrapper":"DeleteDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"} + ], + "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

" + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

Deletes a previously provisioned DB instance.

" + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ], + "documentation":"

Deletes a DB subnet group.

The specified database subnet group must not be associated with any DB instances.

" + }, + "DescribeDBClusterParameterGroups":{ + "name":"DescribeDBClusterParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParameterGroupsMessage"}, + "output":{ + "shape":"DBClusterParameterGroupsMessage", + "resultWrapper":"DescribeDBClusterParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list contains only the description of the specified DB cluster parameter group.

" + }, + "DescribeDBClusterParameters":{ + "name":"DescribeDBClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParametersMessage"}, + "output":{ + "shape":"DBClusterParameterGroupDetails", + "resultWrapper":"DescribeDBClusterParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

" + }, + "DescribeDBClusterSnapshotAttributes":{ + "name":"DescribeDBClusterSnapshotAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotAttributesMessage"}, + "output":{ + "shape":"DescribeDBClusterSnapshotAttributesResult", + "resultWrapper":"DescribeDBClusterSnapshotAttributesResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ], + "documentation":"

Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.

When you share snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If all is included in the list of values for the restore attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.

" + }, + "DescribeDBClusterSnapshots":{ + "name":"DescribeDBClusterSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotsMessage"}, + "output":{ + "shape":"DBClusterSnapshotMessage", + "resultWrapper":"DescribeDBClusterSnapshotsResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ], + "documentation":"

Returns information about DB cluster snapshots. This API operation supports pagination.

" + }, + "DescribeDBClusters":{ + "name":"DescribeDBClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClustersMessage"}, + "output":{ + "shape":"DBClusterMessage", + "resultWrapper":"DescribeDBClustersResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Returns information about provisioned Amazon DocumentDB DB clusters. This API operation supports pagination.

" + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + }, + "documentation":"

Returns a list of the available DB engines.

" + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ], + "documentation":"

Returns information about provisioned Amazon DocumentDB instances. This API supports pagination.

" + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ], + "documentation":"

Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

" + }, + "DescribeEngineDefaultClusterParameters":{ + "name":"DescribeEngineDefaultClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultClusterParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultClusterParametersResult", + "resultWrapper":"DescribeEngineDefaultClusterParametersResult" + }, + "documentation":"

Returns the default engine and system parameter information for the cluster database engine.

" + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + }, + "documentation":"

Displays a list of categories for all event source types, or, if specified, for a specified source type.

" + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + }, + "documentation":"

Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. You can obtain events specific to a particular DB instance, DB security group, DB snapshot, or DB parameter group by providing the name as a parameter. By default, the events of the past hour are returned.

" + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + }, + "documentation":"

Returns a list of orderable DB instance options for the specified engine.

" + }, + "DescribePendingMaintenanceActions":{ + "name":"DescribePendingMaintenanceActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePendingMaintenanceActionsMessage"}, + "output":{ + "shape":"PendingMaintenanceActionsMessage", + "resultWrapper":"DescribePendingMaintenanceActionsResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

" + }, + "FailoverDBCluster":{ + "name":"FailoverDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverDBClusterMessage"}, + "output":{ + "shape":"FailoverDBClusterResult", + "resultWrapper":"FailoverDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Amazon DocumentDB replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

If the primary instance fails, Amazon DocumentDB automatically fails over to an Amazon DocumentDB replica, if one exists. You can force a failover when you want to simulate a failure of a primary instance for testing.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Lists all tags on an Amazon DocumentDB resource.

" + }, + "ModifyDBCluster":{ + "name":"ModifyDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterMessage"}, + "output":{ + "shape":"ModifyDBClusterResult", + "resultWrapper":"ModifyDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBClusterAlreadyExistsFault"} + ], + "documentation":"

Modifies a setting for an Amazon DocumentDB DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" + }, + "ModifyDBClusterParameterGroup":{ + "name":"ModifyDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ModifyDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ], + "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot or maintenance window before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon DocumentDB to fully complete the create action before the parameter group is used as the default for a new DB cluster. This step is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter.

" + }, + "ModifyDBClusterSnapshotAttribute":{ + "name":"ModifyDBClusterSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterSnapshotAttributeMessage"}, + "output":{ + "shape":"ModifyDBClusterSnapshotAttributeResult", + "resultWrapper":"ModifyDBClusterSnapshotAttributeResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"SharedSnapshotQuotaExceededFault"} + ], + "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName, and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

" + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"DBUpgradeDependencyFailureFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"CertificateNotFoundFault"} + ], + "documentation":"

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ], + "documentation":"

Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.

" + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ], + "documentation":"

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain changes, or if you change the DB cluster parameter group that is associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

" + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Removes metadata tags from an Amazon DocumentDB resource.

" + }, + "ResetDBClusterParameterGroup":{ + "name":"ResetDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ResetDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters, submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When you reset the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance reboot.

" + }, + "RestoreDBClusterFromSnapshot":{ + "name":"RestoreDBClusterFromSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterFromSnapshotMessage"}, + "output":{ + "shape":"RestoreDBClusterFromSnapshotResult", + "resultWrapper":"RestoreDBClusterFromSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"KMSKeyNotAccessibleFault"} + ], + "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

" + }, + "RestoreDBClusterToPointInTime":{ + "name":"RestoreDBClusterToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBClusterToPointInTimeResult", + "resultWrapper":"RestoreDBClusterToPointInTimeResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"StorageQuotaExceededFault"} + ], + "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

" + } + }, + "shapes":{ + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{ + "shape":"String", + "documentation":"

The Amazon DocumentDB resource that the tags are added to. This value is an Amazon Resource Name (ARN).

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the Amazon DocumentDB resource.

" + } + }, + "documentation":"

Represents the input to AddTagsToResource.

" + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "ApplyPendingMaintenanceActionMessage":{ + "type":"structure", + "required":[ + "ResourceIdentifier", + "ApplyAction", + "OptInType" + ], + "members":{ + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to.

" + }, + "ApplyAction":{ + "shape":"String", + "documentation":"

The pending maintenance action to apply to this resource.

Valid values: system-update, db-upgrade

" + }, + "OptInType":{ + "shape":"String", + "documentation":"

A value that specifies the type of opt-in request or undoes an opt-in request. An opt-in request of type immediate can't be undone.

Valid values:

" + } + }, + "documentation":"

Represents the input to ApplyPendingMaintenanceAction.

" + }, + "ApplyPendingMaintenanceActionResult":{ + "type":"structure", + "members":{ + "ResourcePendingMaintenanceActions":{"shape":"ResourcePendingMaintenanceActions"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AttributeValue" + } + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified CIDR IP or Amazon EC2 security group isn't authorized for the specified DB security group.

Amazon DocumentDB also might not be authorized to perform necessary actions on your behalf using IAM.

", + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the Availability Zone.

" + } + }, + "documentation":"

Information about an Availability Zone.

", + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "AvailabilityZones":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CertificateNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

CertificateIdentifier doesn't refer to an existing certificate.

", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CloudwatchLogsExportConfiguration":{ + "type":"structure", + "members":{ + "EnableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The list of log types to enable.

" + }, + "DisableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The list of log types to disable.

" + } + }, + "documentation":"

The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine that is being used.

" + }, + "CopyDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBClusterParameterGroupIdentifier", + "TargetDBClusterParameterGroupIdentifier", + "TargetDBClusterParameterGroupDescription" + ], + "members":{ + "SourceDBClusterParameterGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group.

Constraints:

" + }, + "TargetDBClusterParameterGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the copied DB cluster parameter group.

Constraints:

Example: my-cluster-param-group1

" + }, + "TargetDBClusterParameterGroupDescription":{ + "shape":"String", + "documentation":"

A description for the copied DB cluster parameter group.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags that are to be assigned to the parameter group.

" + } + }, + "documentation":"

Represents the input to CopyDBClusterParameterGroup.

" + }, + "CopyDBClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "DBClusterParameterGroup":{"shape":"DBClusterParameterGroup"} + } + }, + "CopyDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBClusterSnapshotIdentifier", + "TargetDBClusterSnapshotIdentifier" + ], + "members":{ + "SourceDBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case sensitive.

You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.

Constraints:

Example: my-cluster-snapshot1

" + }, + "TargetDBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case sensitive.

Constraints:

Example: my-cluster-snapshot2

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key ID for an encrypted DB cluster snapshot. The AWS KMS key ID is the Amazon Resource Name (ARN), AWS KMS key identifier, or the AWS KMS key alias for the AWS KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new AWS KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same AWS KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, set KmsKeyId to the AWS KMS key ID that you want to use to encrypt the copy of the DB cluster snapshot in the destination Region. AWS KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one Region in another Region.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" + }, + "PreSignedUrl":{ + "shape":"String", + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. You must use the PreSignedUrl parameter when copying an encrypted DB cluster snapshot from another AWS Region.

The presigned URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The presigned URL request must contain the following parameter values:

" + }, + "CopyTags":{ + "shape":"BooleanOptional", + "documentation":"

Set to true to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot, and otherwise false. The default is false.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB cluster snapshot.

" + } + }, + "documentation":"

Represents the input to CopyDBClusterSnapshot.

" + }, + "CopyDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CreateDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "Engine" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

A list of Amazon EC2 Availability Zones that instances in the DB cluster can be created in.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier. This parameter is stored as a lowercase string.

Constraints:

Example: my-cluster

" + }, + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to associate with this DB cluster.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of EC2 VPC security groups to associate with this DB cluster.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

A DB subnet group to associate with this DB cluster.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The name of the database engine to be used for this DB cluster.

Valid values: docdb

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine to use.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the instances in the DB cluster accept connections.

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

The name of the master user for the DB cluster.

Constraints:

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

Constraints: Must contain from 8 to 41 characters.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.

Constraints:

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB cluster.

" + }, + "StorageEncrypted":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether the DB cluster is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier for an encrypted DB cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a DB cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new DB cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If an encryption key is not specified in KmsKeyId:

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the replica in that AWS Region.

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that need to be enabled for exporting to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

Represents the input to CreateDBCluster.

" + }, + "CreateDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group.

Constraints:

This value is stored as a lowercase string.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The DB cluster parameter group family name.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description for the DB cluster parameter group.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB cluster parameter group.

" + } + }, + "documentation":"

Represents the input of CreateDBClusterParameterGroup.

" + }, + "CreateDBClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "DBClusterParameterGroup":{"shape":"DBClusterParameterGroup"} + } + }, + "CreateDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "CreateDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "DBClusterIdentifier" + ], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.

Constraints:

Example: my-cluster-snapshot1

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster to create a snapshot for. This parameter is not case sensitive.

Constraints:

Example: my-cluster

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB cluster snapshot.

" + } + }, + "documentation":"

Represents the input of CreateDBClusterSnapshot.

" + }, + "CreateDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBInstanceClass", + "Engine", + "DBClusterIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

Example: mydbinstance

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The compute and memory capacity of the DB instance; for example, db.m4.large.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The name of the database engine to be used for this instance.

Valid value: docdb

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Amazon EC2 Availability Zone that the DB instance is created in.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + }, + "AutoMinorVersionUpgrade":{ + "shape":"BooleanOptional", + "documentation":"

Indicates that minor engine upgrades are applied automatically to the DB instance during the maintenance window.

Default: true

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB instance.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster that the instance will belong to.

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

Default: 1

Valid values: 0-15

" + } + }, + "documentation":"

Represents the input to CreateDBInstance.

" + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name for the DB subnet group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.

Example: mySubnetgroup

" + }, + "DBSubnetGroupDescription":{ + "shape":"String", + "documentation":"

The description for the DB subnet group.

" + }, + "SubnetIds":{ + "shape":"SubnetIdentifierList", + "documentation":"

The Amazon EC2 subnet IDs for the DB subnet group.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB subnet group.

" + } + }, + "documentation":"

Represents the input to CreateDBSubnetGroup.

" + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "DBCluster":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the DB cluster can be created in.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the number of days for which automatic DB snapshots are retained.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

" + }, + "DBClusterParameterGroup":{ + "shape":"String", + "documentation":"

Specifies the name of the DB cluster parameter group for the DB cluster.

" + }, + "DBSubnetGroup":{ + "shape":"String", + "documentation":"

Specifies information on the subnet group that is associated with the DB cluster, including the name, description, and subnets in the subnet group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Specifies the current state of this DB cluster.

" + }, + "PercentProgress":{ + "shape":"String", + "documentation":"

Specifies the progress of the operation as a percentage.

" + }, + "EarliestRestorableTime":{ + "shape":"TStamp", + "documentation":"

The earliest time to which a database can be restored with point-in-time restore.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

Specifies the connection endpoint for the primary instance of the DB cluster.

" + }, + "ReaderEndpoint":{ + "shape":"String", + "documentation":"

The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load balances connections across the Amazon DocumentDB replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the DB cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your DB cluster.

If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.

" + }, + "MultiAZ":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB cluster has instances in multiple Availability Zones.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

Provides the name of the database engine to be used for this DB cluster.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Indicates the database engine version.

" + }, + "LatestRestorableTime":{ + "shape":"TStamp", + "documentation":"

Specifies the latest time to which a database can be restored with point-in-time restore.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the port that the database engine is listening on.

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

Contains the master user name for the DB cluster.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

" + }, + "DBClusterMembers":{ + "shape":"DBClusterMemberList", + "documentation":"

Provides the list of instances that make up the DB cluster.

" + }, + "VpcSecurityGroups":{ + "shape":"VpcSecurityGroupMembershipList", + "documentation":"

Provides a list of virtual private cloud (VPC) security groups that the DB cluster belongs to.

" + }, + "HostedZoneId":{ + "shape":"String", + "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB cluster is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB cluster.

" + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

The AWS Region-unique, immutable identifier for the DB cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB cluster is accessed.

" + }, + "DBClusterArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB cluster.

" + }, + "AssociatedRoles":{ + "shape":"DBClusterRoles", + "documentation":"

Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other AWS services on your behalf.

" + }, + "ClusterCreateTime":{ + "shape":"TStamp", + "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + }, + "EnabledCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that this DB cluster is configured to export to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

Detailed information about a DB cluster.

", + "wrapper":true + }, + "DBClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You already have a DB cluster with the given identifier.

", + "error":{ + "code":"DBClusterAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterList":{ + "type":"list", + "member":{ + "shape":"DBCluster", + "locationName":"DBCluster" + } + }, + "DBClusterMember":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Specifies the instance identifier for this member of the DB cluster.

" + }, + "IsClusterWriter":{ + "shape":"Boolean", + "documentation":"

A value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

" + }, + "DBClusterParameterGroupStatus":{ + "shape":"String", + "documentation":"

Specifies the status of the DB cluster parameter group for this member of the DB cluster.

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

" + } + }, + "documentation":"

Contains information about an instance that is part of a DB cluster.

", + "wrapper":true + }, + "DBClusterMemberList":{ + "type":"list", + "member":{ + "shape":"DBClusterMember", + "locationName":"DBClusterMember" + } + }, + "DBClusterMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBClusters":{ + "shape":"DBClusterList", + "documentation":"

A list of DB clusters.

" + } + }, + "documentation":"

Represents the output of DescribeDBClusters.

" + }, + "DBClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBClusterIdentifier doesn't refer to an existing DB cluster.

", + "error":{ + "code":"DBClusterNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterParameterGroup":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

Provides the name of the DB cluster parameter group.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.

" + }, + "Description":{ + "shape":"String", + "documentation":"

Provides the customer-specified description for this DB cluster parameter group.

" + }, + "DBClusterParameterGroupArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB cluster parameter group.

" + } + }, + "documentation":"

Detailed information about a DB cluster parameter group.

", + "wrapper":true + }, + "DBClusterParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{ + "shape":"ParametersList", + "documentation":"

Provides a list of parameters for the DB cluster parameter group.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the output of DBClusterParameterGroup.

" + }, + "DBClusterParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBClusterParameterGroup", + "locationName":"DBClusterParameterGroup" + } + }, + "DBClusterParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a DB cluster parameter group.

Constraints:

This value is stored as a lowercase string.

" + } + }, + "documentation":"

Contains the name of a DB cluster parameter group.

" + }, + "DBClusterParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBClusterParameterGroupName doesn't refer to an existing DB cluster parameter group.

", + "error":{ + "code":"DBClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBClusterParameterGroups":{ + "shape":"DBClusterParameterGroupList", + "documentation":"

A list of DB cluster parameter groups.

" + } + }, + "documentation":"

Represents the output of DBClusterParameterGroups.

" + }, + "DBClusterQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB cluster can't be created because you have reached the maximum allowed quota of DB clusters.

", + "error":{ + "code":"DBClusterQuotaExceededFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "DBClusterRole":{ + "type":"structure", + "members":{ + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Describes the state of association between the IAM role and the DB cluster. The Status property returns one of the following values:

" + } + }, + "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.

" + }, + "DBClusterRoles":{ + "type":"list", + "member":{ + "shape":"DBClusterRole", + "locationName":"DBClusterRole" + } + }, + "DBClusterSnapshot":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the DB cluster snapshot can be restored in.

" + }, + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

Specifies the identifier for the DB cluster snapshot.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

" + }, + "SnapshotCreateTime":{ + "shape":"TStamp", + "documentation":"

Provides the time when the snapshot was taken, in UTC.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

Specifies the name of the database engine.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Specifies the status of this DB cluster snapshot.

" + }, + "Port":{ + "shape":"Integer", + "documentation":"

Specifies the port that the DB cluster was listening on at the time of the snapshot.

" + }, + "VpcId":{ + "shape":"String", + "documentation":"

Provides the virtual private cloud (VPC) ID that is associated with the DB cluster snapshot.

" + }, + "ClusterCreateTime":{ + "shape":"TStamp", + "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

Provides the master user name for the DB cluster snapshot.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Provides the version of the database engine for this DB cluster snapshot.

" + }, + "SnapshotType":{ + "shape":"String", + "documentation":"

Provides the type of the DB cluster snapshot.

" + }, + "PercentProgress":{ + "shape":"Integer", + "documentation":"

Specifies the percentage of the estimated data that has been transferred.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB cluster snapshot is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB cluster snapshot.

" + }, + "DBClusterSnapshotArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB cluster snapshot.

" + }, + "SourceDBClusterSnapshotArn":{ + "shape":"String", + "documentation":"

If the DB cluster snapshot was copied from a source DB cluster snapshot, the ARN for the source DB cluster snapshot; otherwise, a null value.

" + } + }, + "documentation":"

Detailed information about a DB cluster snapshot.

", + "wrapper":true + }, + "DBClusterSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You already have a DB cluster snapshot with the given identifier.

", + "error":{ + "code":"DBClusterSnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "documentation":"

The name of the manual DB cluster snapshot attribute.

The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual DB cluster snapshot.

" + }, + "AttributeValues":{ + "shape":"AttributeValueList", + "documentation":"

The values for the manual DB cluster snapshot attribute.

If the AttributeName field is set to restore, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all is in the list, then the manual DB cluster snapshot is public and available for any AWS account to copy or restore.

" + } + }, + "documentation":"

Contains the name and values of a manual DB cluster snapshot attribute.

Manual DB cluster snapshot attributes are used to authorize other AWS accounts to restore a manual DB cluster snapshot.

" + }, + "DBClusterSnapshotAttributeList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshotAttribute", + "locationName":"DBClusterSnapshotAttribute" + } + }, + "DBClusterSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot that the attributes apply to.

" + }, + "DBClusterSnapshotAttributes":{ + "shape":"DBClusterSnapshotAttributeList", + "documentation":"

The list of attributes and values for the DB cluster snapshot.

" + } + }, + "documentation":"

Detailed information about the attributes that are associated with a DB cluster snapshot.

", + "wrapper":true + }, + "DBClusterSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshot", + "locationName":"DBClusterSnapshot" + } + }, + "DBClusterSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBClusterSnapshots":{ + "shape":"DBClusterSnapshotList", + "documentation":"

Provides a list of DB cluster snapshots.

" + } + }, + "documentation":"

Represents the output of DescribeDBClusterSnapshots.

" + }, + "DBClusterSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBClusterSnapshotIdentifier doesn't refer to an existing DB cluster snapshot.

", + "error":{ + "code":"DBClusterSnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The name of the database engine.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of the DB parameter group family for the database engine.

" + }, + "DBEngineDescription":{ + "shape":"String", + "documentation":"

The description of the database engine.

" + }, + "DBEngineVersionDescription":{ + "shape":"String", + "documentation":"

The description of the database engine version.

" + }, + "ValidUpgradeTarget":{ + "shape":"ValidUpgradeTargetList", + "documentation":"

A list of engine versions that this database engine version can be upgraded to.

" + }, + "ExportableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The types of logs that the database engine has available for export to Amazon CloudWatch Logs.

" + }, + "SupportsLogExportsToCloudwatchLogs":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.

" + } + }, + "documentation":"

Detailed information about a DB engine version.

" + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBEngineVersions":{ + "shape":"DBEngineVersionList", + "documentation":"

Detailed information about one or more DB engine versions.

" + } + }, + "documentation":"

Represents the output of DescribeDBEngineVersions.

" + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Contains a user-provided database identifier. This identifier is the unique key that identifies a DB instance.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

Contains the name of the compute and memory capacity class of the DB instance.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

Provides the name of the database engine to be used for this DB instance.

" + }, + "DBInstanceStatus":{ + "shape":"String", + "documentation":"

Specifies the current state of this database.

" + }, + "Endpoint":{ + "shape":"Endpoint", + "documentation":"

Specifies the connection endpoint.

" + }, + "InstanceCreateTime":{ + "shape":"TStamp", + "documentation":"

Provides the date and time that the DB instance was created.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

" + }, + "BackupRetentionPeriod":{ + "shape":"Integer", + "documentation":"

Specifies the number of days for which automatic DB snapshots are retained.

" + }, + "VpcSecurityGroups":{ + "shape":"VpcSecurityGroupMembershipList", + "documentation":"

Provides a list of VPC security group elements that the DB instance belongs to.

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

Specifies the name of the Availability Zone that the DB instance is located in.

" + }, + "DBSubnetGroup":{ + "shape":"DBSubnetGroup", + "documentation":"

Specifies information on the subnet group that is associated with the DB instance, including the name, description, and subnets in the subnet group.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

" + }, + "PendingModifiedValues":{ + "shape":"PendingModifiedValues", + "documentation":"

Specifies that changes to the DB instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.

" + }, + "LatestRestorableTime":{ + "shape":"TStamp", + "documentation":"

Specifies the latest time to which a database can be restored with point-in-time restore.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Indicates the database engine version.

" + }, + "AutoMinorVersionUpgrade":{ + "shape":"Boolean", + "documentation":"

Indicates that minor version patches are applied automatically.

" + }, + "PubliclyAccessible":{ + "shape":"Boolean", + "documentation":"

Specifies the availability options for the DB instance. A value of true specifies an internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

" + }, + "StatusInfos":{ + "shape":"DBInstanceStatusInfoList", + "documentation":"

The status of a read replica. If the instance is not a read replica, this is blank.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

Contains the name of the DB cluster that the DB instance is a member of if the DB instance is a member of a DB cluster.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB instance is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB instance.

" + }, + "DbiResourceId":{ + "shape":"String", + "documentation":"

The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

" + }, + "DBInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB instance.

" + }, + "EnabledCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that this DB instance is configured to export to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

Detailed information about a DB instance.

", + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You already have a DB instance with the given identifier.

", + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBInstances":{ + "shape":"DBInstanceList", + "documentation":"

Detailed information about one or more DB instances.

" + } + }, + "documentation":"

Represents the output of DescribeDBInstances.

" + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBInstanceIdentifier doesn't refer to an existing DB instance.

", + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{ + "shape":"String", + "documentation":"

This value is currently \"read replication.\"

" + }, + "Normal":{ + "shape":"Boolean", + "documentation":"

A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

" + }, + "Message":{ + "shape":"String", + "documentation":"

Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

" + } + }, + "documentation":"

Provides a list of status information for a DB instance.

" + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

A DB parameter group with the same name already exists.

", + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBParameterGroupName doesn't refer to an existing DB parameter group.

", + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

This request would cause you to exceed the allowed number of DB parameter groups.

", + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSecurityGroupName doesn't refer to an existing DB security group.

", + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSnapshotIdentifier is already being used by an existing snapshot.

", + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSnapshotIdentifier doesn't refer to an existing DB snapshot.

", + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the DB subnet group.

" + }, + "DBSubnetGroupDescription":{ + "shape":"String", + "documentation":"

Provides the description of the DB subnet group.

" + }, + "VpcId":{ + "shape":"String", + "documentation":"

Provides the virtual private cloud (VPC) ID of the DB subnet group.

" + }, + "SubnetGroupStatus":{ + "shape":"String", + "documentation":"

Provides the status of the DB subnet group.

" + }, + "Subnets":{ + "shape":"SubnetList", + "documentation":"

Detailed information about one or more subnets within a DB subnet group.

" + }, + "DBSubnetGroupArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Identifier (ARN) for the DB subnet group.

" + } + }, + "documentation":"

Detailed information about a DB subnet group.

", + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSubnetGroupName is already being used by an existing DB subnet group.

", + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "documentation":"

Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

", + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBSubnetGroups":{ + "shape":"DBSubnetGroups", + "documentation":"

Detailed information about one or more DB subnet groups.

" + } + }, + "documentation":"

Represents the output of DescribeDBSubnetGroups.

" + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSubnetGroupName doesn't refer to an existing DB subnet group.

", + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request would cause you to exceed the allowed number of DB subnet groups.

", + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request would cause you to exceed the allowed number of subnets in a DB subnet group.

", + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB upgrade failed because a resource that the DB depends on can't be modified.

", + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case sensitive.

Constraints:

" + }, + "SkipFinalSnapshot":{ + "shape":"Boolean", + "documentation":"

Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If true is specified, no DB cluster snapshot is created. If false is specified, a DB cluster snapshot is created before the DB cluster is deleted.

If SkipFinalSnapshot is false, you must specify a FinalDBSnapshotIdentifier parameter.

Default: false

" + }, + "FinalDBSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false.

Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

Constraints:

" + } + }, + "documentation":"

Represents the input to DeleteDBCluster.

" + }, + "DeleteDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group.

Constraints:

" + } + }, + "documentation":"

Represents the input to DeleteDBClusterParameterGroup.

" + }, + "DeleteDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "DeleteDBClusterSnapshotMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot to delete.

Constraints: Must be the name of an existing DB cluster snapshot in the available state.

" + } + }, + "documentation":"

Represents the input to DeleteDBClusterSnapshot.

" + }, + "DeleteDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive.

Constraints:

" + } + }, + "documentation":"

Represents the input to DeleteDBInstance.

" + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the database subnet group to delete.

You can't delete the default subnet group.

Constraints:

Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + } + }, + "documentation":"

Represents the input to DeleteDBSubnetGroup.

" + }, + "DescribeDBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a specific DB cluster parameter group to return details for.

Constraints:

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeDBClusterParameterGroups.

" + }, + "DescribeDBClusterParametersMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a specific DB cluster parameter group to return parameter details for.

Constraints:

" + }, + "Source":{ + "shape":"String", + "documentation":"

A value that indicates to return only parameters for a specific source. Parameter sources can be engine, service, or customer.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeDBClusterParameters.

" + }, + "DescribeDBClusterSnapshotAttributesMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the DB cluster snapshot to describe the attributes for.

" + } + }, + "documentation":"

Represents the input to DescribeDBClusterSnapshotAttributes.

" + }, + "DescribeDBClusterSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotAttributesResult":{"shape":"DBClusterSnapshotAttributesResult"} + } + }, + "DescribeDBClusterSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The ID of the DB cluster to retrieve the list of DB cluster snapshots for. This parameter can't be used with the DBClusterSnapshotIdentifier parameter. This parameter is not case sensitive.

Constraints:

" + }, + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

A specific DB cluster snapshot identifier to describe. This parameter can't be used with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

Constraints:

" + }, + "SnapshotType":{ + "shape":"String", + "documentation":"

The type of DB cluster snapshots to be returned. You can specify one of the following values:

If you don't specify a SnapshotType value, then both automated and manual DB cluster snapshots are returned. You can include shared DB cluster snapshots with these results by setting the IncludeShared parameter to true. You can include public DB cluster snapshots with these results by setting the IncludePublic parameter to true.

The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "IncludeShared":{ + "shape":"Boolean", + "documentation":"

Set to true to include shared manual DB cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false. The default is false.

" + }, + "IncludePublic":{ + "shape":"Boolean", + "documentation":"

Set to true to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false. The default is false.

" + } + }, + "documentation":"

Represents the input to DescribeDBClusterSnapshots.

" + }, + "DescribeDBClustersMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The user-provided DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case sensitive.

Constraints:

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more DB clusters to describe.

Supported filters:

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeDBClusters.

" + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The database engine to return.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The database engine version to return.

Example: 5.1.49

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of a specific DB parameter group family to return details for.

Constraints:

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DefaultOnly":{ + "shape":"Boolean", + "documentation":"

Indicates that only the default version of the specified engine or engine and major version combination is returned.

" + }, + "ListSupportedCharacterSets":{ + "shape":"BooleanOptional", + "documentation":"

If this parameter is specified and the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

" + }, + "ListSupportedTimezones":{ + "shape":"BooleanOptional", + "documentation":"

If this parameter is specified and the requested engine supports the TimeZone parameter for CreateDBInstance, the response includes a list of supported time zones for each engine version.

" + } + }, + "documentation":"

Represents the input to DescribeDBEngineVersions.

" + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The user-provided instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive.

Constraints:

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more DB instances to describe.

Supported filters:

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeDBInstances.

" + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the DB subnet group to return details for.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeDBSubnetGroups.

" + }, + "DescribeEngineDefaultClusterParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group family to return the engine parameter information for.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeEngineDefaultClusterParameters.

" + }, + "DescribeEngineDefaultClusterParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{ + "shape":"String", + "documentation":"

The type of source that is generating the events.

Valid values: db-instance, db-parameter-group, db-security-group, db-snapshot

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + } + }, + "documentation":"

Represents the input to DescribeEventCategories.

" + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the event source for which events are returned. If not specified, then all sources are included in the response.

Constraints:

" + }, + "SourceType":{ + "shape":"SourceType", + "documentation":"

The event source to retrieve events for. If no value is specified, all events are returned.

" + }, + "StartTime":{ + "shape":"TStamp", + "documentation":"

The beginning of the time interval to retrieve events for, specified in ISO 8601 format.

Example: 2009-07-08T18:00Z

" + }, + "EndTime":{ + "shape":"TStamp", + "documentation":"

The end of the time interval for which to retrieve events, specified in ISO 8601 format.

Example: 2009-07-08T18:00Z

" + }, + "Duration":{ + "shape":"IntegerOptional", + "documentation":"

The number of minutes to retrieve events for.

Default: 60

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

A list of event categories that trigger notifications for an event notification subscription.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeEvents.

" + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The name of the engine to retrieve DB instance options for.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The engine version filter value. Specify this parameter to show only the available offerings that match the specified engine version.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The DB instance class filter value. Specify this parameter to show only the available offerings that match the specified DB instance class.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model filter value. Specify this parameter to show only the available offerings that match the specified license model.

" + }, + "Vpc":{ + "shape":"BooleanOptional", + "documentation":"

The virtual private cloud (VPC) filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the input to DescribeOrderableDBInstanceOptions.

" + }, + "DescribePendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

The ARN of a resource to return pending maintenance actions for.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more resources to return pending maintenance actions for.

Supported filters:

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + } + }, + "documentation":"

Represents the input to DescribePendingMaintenanceActions.

" + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"String", + "documentation":"

Specifies the DNS address of the DB instance.

" + }, + "Port":{ + "shape":"Integer", + "documentation":"

Specifies the port that the database engine is listening on.

" + }, + "HostedZoneId":{ + "shape":"String", + "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" + } + }, + "documentation":"

Network information for accessing a DB cluster or DB instance. Client programs must specify a valid endpoint to access these Amazon DocumentDB resources.

" + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group family to return the engine parameter information for.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

The parameters of a particular DB cluster parameter group family.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeEngineDefaultClusterParameters operation.

", + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{ + "shape":"String", + "documentation":"

Provides the identifier for the source of the event.

" + }, + "SourceType":{ + "shape":"SourceType", + "documentation":"

Specifies the source type for this event.

" + }, + "Message":{ + "shape":"String", + "documentation":"

Provides the text of this event.

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

Specifies the category for the event.

" + }, + "Date":{ + "shape":"TStamp", + "documentation":"

Specifies the date and time of the event.

" + }, + "SourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the event.

" + } + }, + "documentation":"

Detailed information about an event.

" + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{ + "shape":"String", + "documentation":"

The source type that the returned categories belong to.

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

The event categories for the specified source type.

" + } + }, + "documentation":"

An event source type, accompanied by one or more event category names.

", + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{ + "shape":"EventCategoriesMapList", + "documentation":"

A list of event category maps.

" + } + }, + "documentation":"

Represents the output of DescribeEventCategories.

" + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "Events":{ + "shape":"EventList", + "documentation":"

Detailed information about one or more events.

" + } + }, + "documentation":"

Represents the output of DescribeEvents.

" + }, + "FailoverDBClusterMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

A DB cluster identifier to force a failover for. This parameter is not case sensitive.

Constraints:

" + }, + "TargetDBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The name of the instance to promote to the primary instance.

You must specify the instance identifier for an Amazon DocumentDB replica in the DB cluster. For example, mydbcluster-replica1.

" + } + }, + "documentation":"

Represents the input to FailoverDBCluster.

" + }, + "FailoverDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the filter. Filter names are case sensitive.

" + }, + "Values":{ + "shape":"FilterValueList", + "documentation":"

One or more filter values. Filter values are case sensitive.

" + } + }, + "documentation":"

A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.

Wildcards are not supported in filters.

" + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request would cause you to exceed the allowed number of DB instances.

", + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB cluster doesn't have enough capacity for the current operation.

", + "error":{ + "code":"InsufficientDBClusterCapacityFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB instance class isn't available in the specified Availability Zone.

", + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientStorageClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

There is not enough storage available for the current action. You might be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

", + "error":{ + "code":"InsufficientStorageClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBClusterSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The provided value isn't a valid DB cluster snapshot state.

", + "error":{ + "code":"InvalidDBClusterSnapshotStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBClusterStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB cluster isn't in a valid state.

", + "error":{ + "code":"InvalidDBClusterStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB instance isn't in the available state.

", + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB parameter group is in use, or it is in a state that is not valid. If you are trying to delete the parameter group, you can't delete it when the parameter group is in this state.

", + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The state of the DB security group doesn't allow deletion.

", + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The state of the DB snapshot doesn't allow deletion.

", + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet group can't be deleted because it's in use.

", + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet isn't in the available state.

", + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You cannot restore from a virtual private cloud (VPC) backup to a non-VPC DB instance.

", + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "documentation":"

The requested subnet is not valid, or multiple subnets were requested that are not all in a common virtual private cloud (VPC).

", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet group doesn't cover all Availability Zones after it is created because of changes that were made.

", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KMSKeyNotAccessibleFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

An error occurred when accessing an AWS KMS key.

", + "error":{ + "code":"KMSKeyNotAccessibleFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{ + "shape":"String", + "documentation":"

The Amazon DocumentDB resource with tags to be listed. This value is an Amazon Resource Name (ARN).

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + } + }, + "documentation":"

Represents the input to ListTagsForResource.

" + }, + "LogTypeList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ModifyDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier for the cluster that is being modified. This parameter is not case sensitive.

Constraints:

" + }, + "NewDBClusterIdentifier":{ + "shape":"String", + "documentation":"

The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

Constraints:

Example: my-cluster2

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

A value that specifies whether the changes in this request and any pending changes are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

The ApplyImmediately parameter affects only the NewDBClusterIdentifier and MasterUserPassword values. If you set this parameter value to false, the changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

Default: false

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

" + }, + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to use for the DB cluster.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of virtual private cloud (VPC) security groups that the DB cluster will belong to.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the DB cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The same port as the original DB cluster.

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

The new password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

Constraints: Must contain from 8 to 41 characters.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.

Constraints:

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + }, + "CloudwatchLogsExportConfiguration":{ + "shape":"CloudwatchLogsExportConfiguration", + "documentation":"

The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific DB instance or DB cluster. The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

" + } + }, + "documentation":"

Represents the input to ModifyDBCluster.

" + }, + "ModifyDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "Parameters" + ], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to modify.

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

A list of parameters in the DB cluster parameter group to modify.

" + } + }, + "documentation":"

Represents the input to ModifyDBClusterParameterGroup.

" + }, + "ModifyDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "ModifyDBClusterSnapshotAttributeMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "AttributeName" + ], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the DB cluster snapshot to modify the attributes for.

" + }, + "AttributeName":{ + "shape":"String", + "documentation":"

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

" + }, + "ValuesToAdd":{ + "shape":"AttributeValueList", + "documentation":"

A list of DB cluster snapshot attributes to add to the attribute specified by AttributeName.

To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs. To make the manual DB cluster snapshot restorable by any AWS account, set it to all. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want to be available to all AWS accounts.

" + }, + "ValuesToRemove":{ + "shape":"AttributeValueList", + "documentation":"

A list of DB cluster snapshot attributes to remove from the attribute specified by AttributeName.

To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers. To remove authorization for any AWS account to copy or restore the DB cluster snapshot, set it to all . If you specify all, an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual DB cluster snapshot.

" + } + }, + "documentation":"

Represents the input to ModifyDBClusterSnapshotAttribute.

" + }, + "ModifyDBClusterSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotAttributesResult":{"shape":"DBClusterSnapshotAttributesResult"} + } + }, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The new compute and memory capacity of the DB instance; for example, db.m4.large. Not all DB instance classes are available in all AWS Regions.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting.

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next reboot.

Default: false

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, changing this parameter causes a reboot of the DB instance. If you are moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure that pending changes are applied.

Default: Uses existing setting.

Format: ddd:hh24:mi-ddd:hh24:mi

Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Must be at least 30 minutes.

" + }, + "AutoMinorVersionUpgrade":{ + "shape":"BooleanOptional", + "documentation":"

Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case, and the change is asynchronously applied as soon as possible. An outage results if this parameter is set to true during the maintenance window, and a newer minor version is available, and Amazon DocumentDB has enabled automatic patching for that engine version.

" + }, + "NewDBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot occurs immediately if you set Apply Immediately to true. It occurs during the next maintenance window if you set Apply Immediately to false. This value is stored as a lowercase string.

Constraints:

Example: mydbinstance

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

Default: 1

Valid values: 0-15

" + } + }, + "documentation":"

Represents the input to ModifyDBInstance.

" + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + }, + "DBSubnetGroupDescription":{ + "shape":"String", + "documentation":"

The description for the DB subnet group.

" + }, + "SubnetIds":{ + "shape":"SubnetIdentifierList", + "documentation":"

The Amazon EC2 subnet IDs for the DB subnet group.

" + } + }, + "documentation":"

Represents the input to ModifyDBSubnetGroup.

" + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The engine type of a DB instance.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The engine version of a DB instance.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The DB instance class for a DB instance.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model for a DB instance.

" + }, + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "documentation":"

A list of Availability Zones for a DB instance.

" + }, + "Vpc":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance is in a virtual private cloud (VPC).

" + } + }, + "documentation":"

The options that are available for a DB instance.

", + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{ + "shape":"OrderableDBInstanceOptionsList", + "documentation":"

The options that are available for a particular orderable DB instance.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the output of DescribeOrderableDBInstanceOptions.

" + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{ + "shape":"String", + "documentation":"

Specifies the name of the parameter.

" + }, + "ParameterValue":{ + "shape":"String", + "documentation":"

Specifies the value of the parameter.

" + }, + "Description":{ + "shape":"String", + "documentation":"

Provides a description of the parameter.

" + }, + "Source":{ + "shape":"String", + "documentation":"

Indicates the source of the parameter value.

" + }, + "ApplyType":{ + "shape":"String", + "documentation":"

Specifies the engine-specific parameters type.

" + }, + "DataType":{ + "shape":"String", + "documentation":"

Specifies the valid data type for the parameter.

" + }, + "AllowedValues":{ + "shape":"String", + "documentation":"

Specifies the valid range of values for the parameter.

" + }, + "IsModifiable":{ + "shape":"Boolean", + "documentation":"

Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

" + }, + "MinimumEngineVersion":{ + "shape":"String", + "documentation":"

The earliest engine version to which the parameter can apply.

" + }, + "ApplyMethod":{ + "shape":"ApplyMethod", + "documentation":"

Indicates when to apply parameter updates.

" + } + }, + "documentation":"

Detailed information about an individual parameter.

" + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingCloudwatchLogsExports":{ + "type":"structure", + "members":{ + "LogTypesToEnable":{ + "shape":"LogTypeList", + "documentation":"

Log types that are in the process of being deactivated. After they are deactivated, these log types aren't exported to CloudWatch Logs.

" + }, + "LogTypesToDisable":{ + "shape":"LogTypeList", + "documentation":"

Log types that are in the process of being enabled. After they are enabled, these log types are exported to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.

" + }, + "PendingMaintenanceAction":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"String", + "documentation":"

The type of pending maintenance action that is available for the resource.

" + }, + "AutoAppliedAfterDate":{ + "shape":"TStamp", + "documentation":"

The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.

" + }, + "ForcedApplyDate":{ + "shape":"TStamp", + "documentation":"

The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.

" + }, + "OptInStatus":{ + "shape":"String", + "documentation":"

Indicates the type of opt-in request that has been received for the resource.

" + }, + "CurrentApplyDate":{ + "shape":"TStamp", + "documentation":"

The effective date when the pending maintenance action is applied to the resource.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description providing more detail about the maintenance action.

" + } + }, + "documentation":"

Provides information about a pending maintenance action for a resource.

" + }, + "PendingMaintenanceActionDetails":{ + "type":"list", + "member":{ + "shape":"PendingMaintenanceAction", + "locationName":"PendingMaintenanceAction" + } + }, + "PendingMaintenanceActions":{ + "type":"list", + "member":{ + "shape":"ResourcePendingMaintenanceActions", + "locationName":"ResourcePendingMaintenanceActions" + } + }, + "PendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "PendingMaintenanceActions":{ + "shape":"PendingMaintenanceActions", + "documentation":"

The maintenance actions to be applied.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Represents the output of DescribePendingMaintenanceActions.

" + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{ + "shape":"String", + "documentation":"

Contains the new DBInstanceClass for the DB instance that will be applied or is currently being applied.

" + }, + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

Contains the new AllocatedStorage size for the DB instance that will be applied or is currently being applied.

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

Contains the pending or currently in-progress change of the master credentials for the DB instance.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the pending port for the DB instance.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the pending number of days for which automated backups are retained.

" + }, + "MultiAZ":{ + "shape":"BooleanOptional", + "documentation":"

Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Indicates the database engine version.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model for the DB instance.

Valid values: license-included, bring-your-own-license, general-public-license

" + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the new Provisioned IOPS value for the DB instance that will be applied or is currently being applied.

" + }, + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Contains the new DBInstanceIdentifier for the DB instance that will be applied or is currently being applied.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

Specifies the storage type to be associated with the DB instance.

" + }, + "CACertificateIdentifier":{ + "shape":"String", + "documentation":"

Specifies the identifier of the certificate authority (CA) certificate for the DB instance.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The new DB subnet group for the DB instance.

" + }, + "PendingCloudwatchLogsExports":{ + "shape":"PendingCloudwatchLogsExports", + "documentation":"

A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.

" + } + }, + "documentation":"

One or more modified settings for a DB instance. These modified settings have been requested, but haven't been applied yet.

" + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

" + }, + "ForceFailover":{ + "shape":"BooleanOptional", + "documentation":"

When true, the reboot is conducted through a Multi-AZ failover.

Constraint: You can't specify true if the instance is not configured for Multi-AZ.

" + } + }, + "documentation":"

Represents the input to RebootDBInstance.

" + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{ + "shape":"String", + "documentation":"

The Amazon DocumentDB resource that the tags are removed from. This value is an Amazon Resource Name (ARN).

" + }, + "TagKeys":{ + "shape":"KeyList", + "documentation":"

The tag key (name) of the tag to be removed.

" + } + }, + "documentation":"

Represents the input to RemoveTagsFromResource.

" + }, + "ResetDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to reset.

" + }, + "ResetAllParameters":{ + "shape":"Boolean", + "documentation":"

A value that is set to true to reset all parameters in the DB cluster parameter group to their default values, and false otherwise. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter.

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is set to true.

" + } + }, + "documentation":"

Represents the input to ResetDBClusterParameterGroup.

" + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified resource ID was not found.

", + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourcePendingMaintenanceActions":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource that has pending maintenance actions.

" + }, + "PendingMaintenanceActionDetails":{ + "shape":"PendingMaintenanceActionDetails", + "documentation":"

A list that provides details about the pending maintenance actions for the resource.

" + } + }, + "documentation":"

Represents the output of ApplyPendingMaintenanceAction.

", + "wrapper":true + }, + "RestoreDBClusterFromSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SnapshotIdentifier", + "Engine" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the restored DB cluster can be created in.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. This parameter isn't case sensitive.

Constraints:

Example: my-snapshot-id

" + }, + "SnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the DB snapshot or DB cluster snapshot to restore from.

You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.

Constraints:

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The database engine to use for the new DB cluster.

Default: The same as source.

Constraint: Must be compatible with the engine of the source.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version of the database engine to use for the new DB cluster.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The same port as the original DB cluster.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the DB subnet group to use for the new DB cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of virtual private cloud (VPC) security groups that the new DB cluster will belong to.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the restored DB cluster.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new DB cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that must be enabled for exporting to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

Represents the input to RestoreDBClusterFromSnapshot.

" + }, + "RestoreDBClusterFromSnapshotResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "RestoreDBClusterToPointInTimeMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SourceDBClusterIdentifier" + ], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the new DB cluster to be created.

Constraints:

" + }, + "SourceDBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the source DB cluster from which to restore.

Constraints:

" + }, + "RestoreToTime":{ + "shape":"TStamp", + "documentation":"

The date and time to restore the DB cluster to.

Valid values: A time in Universal Coordinated Time (UTC) format.

Constraints:

Example: 2015-03-07T23:45:00Z

" + }, + "UseLatestRestorableTime":{ + "shape":"Boolean", + "documentation":"

A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise.

Default: false

Constraints: Cannot be specified if the RestoreToTime parameter is provided.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The default port for the engine.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The DB subnet group name to use for the new DB cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of VPC security groups that the new DB cluster belongs to.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the restored DB cluster.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new DB cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

You can restore to a new DB cluster and encrypt the new DB cluster with an AWS KMS key that is different from the AWS KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the AWS KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that must be enabled for exporting to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

Represents the input to RestoreDBClusterToPointInTime.

" + }, + "RestoreDBClusterToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "SharedSnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You have exceeded the maximum number of accounts that you can share a manual DB snapshot with.

", + "error":{ + "code":"SharedSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request would cause you to exceed the allowed number of DB snapshots.

", + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot", + "db-cluster", + "db-cluster-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request would cause you to exceed the allowed amount of storage available across all DB instances.

", + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StorageTypeNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Storage of the specified StorageType can't be associated with the DB instance.

", + "error":{ + "code":"StorageTypeNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{ + "shape":"String", + "documentation":"

Specifies the identifier of the subnet.

" + }, + "SubnetAvailabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

Specifies the Availability Zone for the subnet.

" + }, + "SubnetStatus":{ + "shape":"String", + "documentation":"

Specifies the status of the subnet.

" + } + }, + "documentation":"

Detailed information about a subnet.

" + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet is already in use in the Availability Zone.

", + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "documentation":"

The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can contain only the set of Unicode letters, digits, white space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" + }, + "Value":{ + "shape":"String", + "documentation":"

The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can contain only the set of Unicode letters, digits, white space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" + } + }, + "documentation":"

Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.

" + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{ + "shape":"TagList", + "documentation":"

A list of one or more tags.

" + } + }, + "documentation":"

Represents the output of ListTagsForResource.

" + }, + "UpgradeTarget":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The name of the upgrade target database engine.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the upgrade target database engine.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" + }, + "AutoUpgrade":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

" + }, + "IsMajorVersionUpgrade":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether a database engine is upgraded to a major version.

" + } + }, + "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" + }, + "ValidUpgradeTargetList":{ + "type":"list", + "member":{ + "shape":"UpgradeTarget", + "locationName":"UpgradeTarget" + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{ + "shape":"String", + "documentation":"

The name of the VPC security group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the VPC security group.

" + } + }, + "documentation":"

Used as a response element for queries on virtual private cloud (VPC) security group membership.

" + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + }, + "documentation":"

Amazon DocumentDB API documentation

" +} diff --git a/botocore/data/docdb/2014-10-31/waiters-2.json b/botocore/data/docdb/2014-10-31/waiters-2.json new file mode 100644 index 00000000..e75f03b2 --- /dev/null +++ b/botocore/data/docdb/2014-10-31/waiters-2.json @@ -0,0 +1,90 @@ +{ + "version": 2, + "waiters": { + "DBInstanceAvailable": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + }, + "DBInstanceDeleted": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "DBInstanceNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + } + } +} diff --git a/botocore/data/ds/2015-04-16/paginators-1.json b/botocore/data/ds/2015-04-16/paginators-1.json index 409e2250..3b74918e 100644 --- a/botocore/data/ds/2015-04-16/paginators-1.json +++ b/botocore/data/ds/2015-04-16/paginators-1.json @@ -5,6 +5,54 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "Limit" + }, + "DescribeDirectories": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "DirectoryDescriptions" + }, + "DescribeSharedDirectories": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "SharedDirectories" + }, + "DescribeSnapshots": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Snapshots" + }, + "DescribeTrusts": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Trusts" + }, + "ListIpRoutes": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "IpRoutesInfo" + }, + "ListLogSubscriptions": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "LogSubscriptions" + }, + "ListSchemaExtensions": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "SchemaExtensionsInfo" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Tags" } } } diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index a5c09885..0b142fed 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -1122,6 +1122,10 @@ "ConnectSettings":{ "shape":"DirectoryConnectSettings", "documentation":"

A DirectoryConnectSettings object that contains additional information for the operation.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to be assigned to AD Connector.

" } }, "documentation":"

Contains the inputs for the ConnectDirectory operation.

" @@ -1276,6 +1280,10 @@ "VpcSettings":{ "shape":"DirectoryVpcSettings", "documentation":"

A DirectoryVpcSettings object that contains additional information for the operation.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to be assigned to the Simple AD directory.

" } }, "documentation":"

Contains the inputs for the CreateDirectory operation.

" @@ -1343,6 +1351,10 @@ "Edition":{ "shape":"DirectoryEdition", "documentation":"

AWS Managed Microsoft AD is available in two editions: Standard and Enterprise. Enterprise is the default.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to be assigned to the AWS Managed Microsoft AD directory.

" } }, "documentation":"

Creates an AWS Managed Microsoft AD directory.

" diff --git a/botocore/data/dynamodb/2012-08-10/paginators-1.json b/botocore/data/dynamodb/2012-08-10/paginators-1.json index b22762c9..8e10a0c7 100644 --- a/botocore/data/dynamodb/2012-08-10/paginators-1.json +++ b/botocore/data/dynamodb/2012-08-10/paginators-1.json @@ -37,6 +37,11 @@ "non_aggregate_keys": [ "ConsumedCapacity" ] + }, + "ListTagsOfResource": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Tags" } } } diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 4e788116..cbb55c3c 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -202,6 +202,7 @@ }, "input":{"shape":"DescribeEndpointsRequest"}, "output":{"shape":"DescribeEndpointsResponse"}, + "documentation":"

Returns the regional endpoint information.

", "endpointoperation":true }, "DescribeGlobalTable":{ @@ -729,7 +730,7 @@ }, "L":{ "shape":"ListAttributeValue", - "documentation":"

An attribute of type List. For example:

\"L\": [\"Cookies\", \"Coffee\", 3.14159]

" + "documentation":"

An attribute of type List. For example:

\"L\": [ {\"S\": \"Cookies\"} , {\"S\": \"Coffee\"}, {\"N\", \"3.14159\"}]

" }, "NULL":{ "shape":"NullAttributeValue", @@ -956,7 +957,7 @@ }, "BackupType":{ "shape":"BackupType", - "documentation":"

BackupType:

" + "documentation":"

BackupType:

" }, "BackupCreationDateTime":{ "shape":"BackupCreationDateTime", @@ -1044,7 +1045,7 @@ }, "BackupType":{ "shape":"BackupType", - "documentation":"

BackupType:

" + "documentation":"

BackupType:

" }, "BackupSizeBytes":{ "shape":"BackupSizeBytes", @@ -1057,7 +1058,8 @@ "type":"string", "enum":[ "USER", - "SYSTEM" + "SYSTEM", + "AWS_BACKUP" ] }, "BackupTypeFilter":{ @@ -1065,6 +1067,7 @@ "enum":[ "USER", "SYSTEM", + "AWS_BACKUP", "ALL" ] }, @@ -1501,11 +1504,11 @@ }, "LocalSecondaryIndexes":{ "shape":"LocalSecondaryIndexList", - "documentation":"

One or more local secondary indexes (the maximum is five) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.

Each local secondary index in the array includes the following:

" + "documentation":"

One or more local secondary indexes (the maximum is 5) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.

Each local secondary index in the array includes the following:

" }, "GlobalSecondaryIndexes":{ "shape":"GlobalSecondaryIndexList", - "documentation":"

One or more global secondary indexes (the maximum is five) to be created on the table. Each global secondary index in the array includes the following:

" + "documentation":"

One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index in the array includes the following:

" }, "BillingMode":{ "shape":"BillingMode", @@ -1756,7 +1759,10 @@ "type":"structure", "required":["Endpoints"], "members":{ - "Endpoints":{"shape":"Endpoints"} + "Endpoints":{ + "shape":"Endpoints", + "documentation":"

List of endpoints.

" + } } }, "DescribeGlobalTableInput":{ @@ -1877,9 +1883,16 @@ "CachePeriodInMinutes" ], "members":{ - "Address":{"shape":"String"}, - "CachePeriodInMinutes":{"shape":"Long"} - } + "Address":{ + "shape":"String", + "documentation":"

IP address of the endpoint.

" + }, + "CachePeriodInMinutes":{ + "shape":"Long", + "documentation":"

Endpoint cache time to live (TTL) value.

" + } + }, + "documentation":"

An endpoint information details.

" }, "Endpoints":{ "type":"list", @@ -4021,7 +4034,7 @@ "documentation":"

A list of cancellation reasons.

" } }, - "documentation":"

The entire transaction request was rejected.

DynamoDB will reject the entire TransactWriteItems request if any of the following is true:

", + "documentation":"

The entire transaction request was rejected.

DynamoDB rejects a TransactWriteItems request under the following circumstances:

DynamoDB rejects a TransactGetItems request under the following circumstances:

", "exception":true }, "TransactionConflictException":{ diff --git a/botocore/data/ec2/2016-11-15/paginators-1.json b/botocore/data/ec2/2016-11-15/paginators-1.json index cac41188..3b3248cf 100644 --- a/botocore/data/ec2/2016-11-15/paginators-1.json +++ b/botocore/data/ec2/2016-11-15/paginators-1.json @@ -112,6 +112,252 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "VpcEndpointConnections" + }, + "DescribeByoipCidrs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ByoipCidrs" + }, + "DescribeCapacityReservations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CapacityReservations" + }, + "DescribeClassicLinkInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Instances" + }, + "DescribeClientVpnAuthorizationRules": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AuthorizationRules" + }, + "DescribeClientVpnConnections": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Connections" + }, + "DescribeClientVpnEndpoints": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ClientVpnEndpoints" + }, + "DescribeClientVpnRoutes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Routes" + }, + "DescribeClientVpnTargetNetworks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ClientVpnTargetNetworks" + }, + "DescribeEgressOnlyInternetGateways": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "EgressOnlyInternetGateways" + }, + "DescribeFleets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Fleets" + }, + "DescribeFlowLogs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "FlowLogs" + }, + "DescribeFpgaImages": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "FpgaImages" + }, + "DescribeHostReservationOfferings": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "OfferingSet" + }, + "DescribeHostReservations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "HostReservationSet" + }, + "DescribeHosts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Hosts" + }, + "DescribeImportImageTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ImportImageTasks" + }, + "DescribeImportSnapshotTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ImportSnapshotTasks" + }, + "DescribeInstanceCreditSpecifications": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstanceCreditSpecifications" + }, + "DescribeLaunchTemplateVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LaunchTemplateVersions" + }, + "DescribeLaunchTemplates": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LaunchTemplates" + }, + "DescribeMovingAddresses": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MovingAddressStatuses" + }, + "DescribeNetworkInterfacePermissions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "NetworkInterfacePermissions" + }, + "DescribePrefixLists": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PrefixLists" + }, + "DescribePrincipalIdFormat": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Principals" + }, + "DescribePublicIpv4Pools": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PublicIpv4Pools" + }, + "DescribeScheduledInstanceAvailability": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScheduledInstanceAvailabilitySet" + }, + "DescribeScheduledInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScheduledInstanceSet" + }, + "DescribeStaleSecurityGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "StaleSecurityGroupSet" + }, + "DescribeTransitGatewayAttachments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransitGatewayAttachments" + }, + "DescribeTransitGatewayRouteTables": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransitGatewayRouteTables" + }, + "DescribeTransitGatewayVpcAttachments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransitGatewayVpcAttachments" + }, + "DescribeTransitGateways": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransitGateways" + }, + "DescribeVolumesModifications": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "VolumesModifications" + }, + "DescribeVpcClassicLinkDnsSupport": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Vpcs" + }, + "DescribeVpcEndpointConnectionNotifications": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ConnectionNotificationSet" + }, + "DescribeVpcEndpointServiceConfigurations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ServiceConfigurations" + }, + "DescribeVpcEndpointServicePermissions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AllowedPrincipals" + }, + "DescribeVpcPeeringConnections": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "VpcPeeringConnections" + }, + "GetTransitGatewayAttachmentPropagations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransitGatewayAttachmentPropagations" + }, + "GetTransitGatewayRouteTableAssociations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Associations" + }, + "GetTransitGatewayRouteTablePropagations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransitGatewayRouteTablePropagations" } } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 07fa75da..201f826c 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -70,7 +70,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

Allocates an Elastic IP address to your AWS account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different AWS account.

You can allocate an Elastic IP address from an address pool owned by AWS or from an address pool created from a public IPv4 address range that you have brought to AWS for use with your AWS resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

[EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another AWS account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per region and 5 Elastic IP addresses for EC2-VPC per region.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Allocates an Elastic IP address to your AWS account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different AWS account.

You can allocate an Elastic IP address from an address pool owned by AWS or from an address pool created from a public IPv4 address range that you have brought to AWS for use with your AWS resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

[EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another AWS account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per region and 5 Elastic IP addresses for EC2-VPC per region.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" }, "AllocateHosts":{ "name":"AllocateHosts", @@ -100,7 +100,7 @@ }, "input":{"shape":"AssignIpv6AddressesRequest"}, "output":{"shape":"AssignIpv6AddressesResult"}, - "documentation":"

Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

" }, "AssignPrivateIpAddresses":{ "name":"AssignPrivateIpAddresses", @@ -109,7 +109,7 @@ "requestUri":"/" }, "input":{"shape":"AssignPrivateIpAddressesRequest"}, - "documentation":"

Assigns one or more secondary private IP addresses to the specified network interface.

You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved.

Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete.

" + "documentation":"

Assigns one or more secondary private IP addresses to the specified network interface.

You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved.

Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete.

" }, "AssociateAddress":{ "name":"AssociateAddress", @@ -119,7 +119,7 @@ }, "input":{"shape":"AssociateAddressRequest"}, "output":{"shape":"AssociateAddressResult"}, - "documentation":"

Associates an Elastic IP address with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

" + "documentation":"

Associates an Elastic IP address with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

" }, "AssociateClientVpnTargetNetwork":{ "name":"AssociateClientVpnTargetNetwork", @@ -138,7 +138,7 @@ "requestUri":"/" }, "input":{"shape":"AssociateDhcpOptionsRequest"}, - "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, "AssociateIamInstanceProfile":{ "name":"AssociateIamInstanceProfile", @@ -158,7 +158,7 @@ }, "input":{"shape":"AssociateRouteTableRequest"}, "output":{"shape":"AssociateRouteTableResult"}, - "documentation":"

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "AssociateSubnetCidrBlock":{ "name":"AssociateSubnetCidrBlock", @@ -188,7 +188,7 @@ }, "input":{"shape":"AssociateVpcCidrBlockRequest"}, "output":{"shape":"AssociateVpcCidrBlockResult"}, - "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

" }, "AttachClassicLinkVpc":{ "name":"AttachClassicLinkVpc", @@ -207,7 +207,7 @@ "requestUri":"/" }, "input":{"shape":"AttachInternetGatewayRequest"}, - "documentation":"

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Attaches an internet gateway to a VPC, enabling connectivity between the internet and the VPC. For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

" }, "AttachNetworkInterface":{ "name":"AttachNetworkInterface", @@ -227,7 +227,7 @@ }, "input":{"shape":"AttachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

If a volume has an AWS Marketplace product code:

For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

If a volume has an AWS Marketplace product code:

For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "AttachVpnGateway":{ "name":"AttachVpnGateway", @@ -237,7 +237,7 @@ }, "input":{"shape":"AttachVpnGatewayRequest"}, "output":{"shape":"AttachVpnGatewayResult"}, - "documentation":"

Attaches a virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time.

For more information, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Attaches a virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "AuthorizeClientVpnIngress":{ "name":"AuthorizeClientVpnIngress", @@ -256,7 +256,7 @@ "requestUri":"/" }, "input":{"shape":"AuthorizeSecurityGroupEgressRequest"}, - "documentation":"

[EC2-VPC only] Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination IPv4 or IPv6 CIDR address ranges, or to one or more destination security groups for the same VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide. For more information about security group limits, see Amazon VPC Limits.

Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes. You can optionally specify a description for the rule.

Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

" + "documentation":"

[EC2-VPC only] Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination IPv4 or IPv6 CIDR address ranges, or to one or more destination security groups for the same VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide. For more information about security group limits, see Amazon VPC Limits.

Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes. You can optionally specify a description for the rule.

Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

" }, "AuthorizeSecurityGroupIngress":{ "name":"AuthorizeSecurityGroupIngress", @@ -265,7 +265,7 @@ "requestUri":"/" }, "input":{"shape":"AuthorizeSecurityGroupIngressRequest"}, - "documentation":"

Adds one or more ingress rules to a security group.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

[EC2-Classic] This action gives one or more IPv4 CIDR address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another. You can have up to 100 rules per group.

[EC2-VPC] This action gives one or more IPv4 or IPv6 CIDR address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC or a peer VPC in a VPC peering connection. For more information about VPC security group limits, see Amazon VPC Limits.

You can optionally specify a description for the security group rule.

" + "documentation":"

Adds one or more ingress rules to a security group.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

[EC2-Classic] This action gives one or more IPv4 CIDR address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another. You can have up to 100 rules per group.

[EC2-VPC] This action gives one or more IPv4 or IPv6 CIDR address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC or a peer VPC in a VPC peering connection. For more information about VPC security group limits, see Amazon VPC Limits.

You can optionally specify a description for the security group rule.

" }, "BundleInstance":{ "name":"BundleInstance", @@ -304,7 +304,7 @@ "requestUri":"/" }, "input":{"shape":"CancelConversionRequest"}, - "documentation":"

Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.

" + "documentation":"

Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.

" }, "CancelExportTask":{ "name":"CancelExportTask", @@ -333,7 +333,7 @@ }, "input":{"shape":"CancelReservedInstancesListingRequest"}, "output":{"shape":"CancelReservedInstancesListingResult"}, - "documentation":"

Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" }, "CancelSpotFleetRequests":{ "name":"CancelSpotFleetRequests", @@ -383,7 +383,7 @@ }, "input":{"shape":"CopyImageRequest"}, "output":{"shape":"CopyImageResult"}, - "documentation":"

Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. You cannot create an unencrypted copy of an encrypted backing snapshot.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. You cannot create an unencrypted copy of an encrypted backing snapshot.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -393,7 +393,7 @@ }, "input":{"shape":"CopySnapshotRequest"}, "output":{"shape":"CopySnapshotResult"}, - "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateCapacityReservation":{ "name":"CreateCapacityReservation", @@ -403,7 +403,7 @@ }, "input":{"shape":"CreateCapacityReservationRequest"}, "output":{"shape":"CreateCapacityReservationResult"}, - "documentation":"

Creates a new Capacity Reservation with the specified attributes.

Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This gives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. By creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. For more information, see Capacity Reservations in the Amazon Elastic Compute Cloud User Guide.

Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to fulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try again at a later time, try in a different Availability Zone, or request a smaller capacity reservation. If your application is flexible across instance types and sizes, try to create a Capacity Reservation with different instance attributes.

Your request could also fail if the requested quantity exceeds your On-Demand Instance limit for the selected instance type. If your request fails due to limit constraints, increase your On-Demand Instance limit for the required instance type and try again. For more information about increasing your instance limits, see Amazon EC2 Service Limits in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a new Capacity Reservation with the specified attributes.

Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This gives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. By creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. For more information, see Capacity Reservations in the Amazon Elastic Compute Cloud User Guide.

Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to fulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try again at a later time, try in a different Availability Zone, or request a smaller capacity reservation. If your application is flexible across instance types and sizes, try to create a Capacity Reservation with different instance attributes.

Your request could also fail if the requested quantity exceeds your On-Demand Instance limit for the selected instance type. If your request fails due to limit constraints, increase your On-Demand Instance limit for the required instance type and try again. For more information about increasing your instance limits, see Amazon EC2 Service Limits in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateClientVpnEndpoint":{ "name":"CreateClientVpnEndpoint", @@ -433,7 +433,7 @@ }, "input":{"shape":"CreateCustomerGatewayRequest"}, "output":{"shape":"CreateCustomerGatewayResult"}, - "documentation":"

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

For more information about VPN customer gateways, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

" + "documentation":"

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

" }, "CreateDefaultSubnet":{ "name":"CreateDefaultSubnet", @@ -443,7 +443,7 @@ }, "input":{"shape":"CreateDefaultSubnetRequest"}, "output":{"shape":"CreateDefaultSubnetResult"}, - "documentation":"

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

" }, "CreateDefaultVpc":{ "name":"CreateDefaultVpc", @@ -453,7 +453,7 @@ }, "input":{"shape":"CreateDefaultVpcRequest"}, "output":{"shape":"CreateDefaultVpcResult"}, - "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" + "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" }, "CreateDhcpOptions":{ "name":"CreateDhcpOptions", @@ -463,7 +463,7 @@ }, "input":{"shape":"CreateDhcpOptionsRequest"}, "output":{"shape":"CreateDhcpOptionsResult"}, - "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateEgressOnlyInternetGateway":{ "name":"CreateEgressOnlyInternetGateway", @@ -483,7 +483,7 @@ }, "input":{"shape":"CreateFleetRequest"}, "output":{"shape":"CreateFleetResult"}, - "documentation":"

Launches an EC2 Fleet.

You can create a single EC2 Fleet that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

For more information, see Launching an EC2 Fleet in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Launches an EC2 Fleet.

You can create a single EC2 Fleet that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

For more information, see Launching an EC2 Fleet in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateFlowLogs":{ "name":"CreateFlowLogs", @@ -493,7 +493,7 @@ }, "input":{"shape":"CreateFlowLogsRequest"}, "output":{"shape":"CreateFlowLogsResult"}, - "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC.

Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow Log Records in the Amazon Virtual Private Cloud User Guide.

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket.

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" }, "CreateFpgaImage":{ "name":"CreateFpgaImage", @@ -513,7 +513,7 @@ }, "input":{"shape":"CreateImageRequest"}, "output":{"shape":"CreateImageResult"}, - "documentation":"

Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateInstanceExportTask":{ "name":"CreateInstanceExportTask", @@ -523,7 +523,7 @@ }, "input":{"shape":"CreateInstanceExportTaskRequest"}, "output":{"shape":"CreateInstanceExportTaskResult"}, - "documentation":"

Exports a running or stopped instance to an S3 bucket.

For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting an Instance as a VM Using VM Import/Export in the VM Import/Export User Guide.

" + "documentation":"

Exports a running or stopped instance to an S3 bucket.

For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting an Instance as a VM Using VM Import/Export in the VM Import/Export User Guide.

" }, "CreateInternetGateway":{ "name":"CreateInternetGateway", @@ -533,7 +533,7 @@ }, "input":{"shape":"CreateInternetGatewayRequest"}, "output":{"shape":"CreateInternetGatewayResult"}, - "documentation":"

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates an internet gateway for use with a VPC. After creating the internet gateway, you attach it to a VPC using AttachInternetGateway.

For more information about your VPC and internet gateway, see the Amazon Virtual Private Cloud User Guide.

" }, "CreateKeyPair":{ "name":"CreateKeyPair", @@ -543,7 +543,7 @@ }, "input":{"shape":"CreateKeyPairRequest"}, "output":{"shape":"KeyPair"}, - "documentation":"

Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#1 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

You can have up to five thousand key pairs per region.

The key pair returned to you is available only in the region in which you create it. If you prefer, you can create your own key pair using a third-party tool and upload it to any region using ImportKeyPair.

For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#1 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

You can have up to five thousand key pairs per region.

The key pair returned to you is available only in the region in which you create it. If you prefer, you can create your own key pair using a third-party tool and upload it to any region using ImportKeyPair.

For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateLaunchTemplate":{ "name":"CreateLaunchTemplate", @@ -573,7 +573,7 @@ }, "input":{"shape":"CreateNatGatewayRequest"}, "output":{"shape":"CreateNatGatewayResult"}, - "documentation":"

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkAcl":{ "name":"CreateNetworkAcl", @@ -583,7 +583,7 @@ }, "input":{"shape":"CreateNetworkAclRequest"}, "output":{"shape":"CreateNetworkAclResult"}, - "documentation":"

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkAclEntry":{ "name":"CreateNetworkAclEntry", @@ -592,7 +592,7 @@ "requestUri":"/" }, "input":{"shape":"CreateNetworkAclEntryRequest"}, - "documentation":"

Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkInterface":{ "name":"CreateNetworkInterface", @@ -602,7 +602,7 @@ }, "input":{"shape":"CreateNetworkInterfaceRequest"}, "output":{"shape":"CreateNetworkInterfaceResult"}, - "documentation":"

Creates a network interface in the specified subnet.

For more information about network interfaces, see Elastic Network Interfaces in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a network interface in the specified subnet.

For more information about network interfaces, see Elastic Network Interfaces in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkInterfacePermission":{ "name":"CreateNetworkInterfacePermission", @@ -621,7 +621,7 @@ "requestUri":"/" }, "input":{"shape":"CreatePlacementGroupRequest"}, - "documentation":"

Creates a placement group in which to launch instances. The strategy of the placement group determines how the instances are organized within the group.

A cluster placement group is a logical grouping of instances within a single Availability Zone that benefit from low network latency, high network throughput. A spread placement group places instances on distinct hardware. A partition placement group places groups of instances in different partitions, where instances in one partition do not share the same hardware with instances in another partition.

For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a placement group in which to launch instances. The strategy of the placement group determines how the instances are organized within the group.

A cluster placement group is a logical grouping of instances within a single Availability Zone that benefit from low network latency, high network throughput. A spread placement group places instances on distinct hardware. A partition placement group places groups of instances in different partitions, where instances in one partition do not share the same hardware with instances in another partition.

For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateReservedInstancesListing":{ "name":"CreateReservedInstancesListing", @@ -631,7 +631,7 @@ }, "input":{"shape":"CreateReservedInstancesListingRequest"}, "output":{"shape":"CreateReservedInstancesListingResult"}, - "documentation":"

Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your Standard Reserved Instances, you can use the DescribeReservedInstances operation.

Only Standard Reserved Instances with a capacity reservation can be sold in the Reserved Instance Marketplace. Convertible Reserved Instances and Standard Reserved Instances with a regional benefit cannot be sold.

The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your Standard Reserved Instances, you can use the DescribeReservedInstances operation.

Only Standard Reserved Instances with a capacity reservation can be sold in the Reserved Instance Marketplace. Convertible Reserved Instances and Standard Reserved Instances with a regional benefit cannot be sold.

The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateRoute":{ "name":"CreateRoute", @@ -641,7 +641,7 @@ }, "input":{"shape":"CreateRouteRequest"}, "output":{"shape":"CreateRouteResult"}, - "documentation":"

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a route in a route table within a VPC.

You must specify one of the following targets: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

When determining how to route traffic, we use the route with the most specific match. For example, traffic is destined for the IPv4 address 192.0.2.3, and the route table includes the following two IPv4 routes:

Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "CreateRouteTable":{ "name":"CreateRouteTable", @@ -651,7 +651,7 @@ }, "input":{"shape":"CreateRouteTableRequest"}, "output":{"shape":"CreateRouteTableResult"}, - "documentation":"

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "CreateSecurityGroup":{ "name":"CreateSecurityGroup", @@ -661,7 +661,7 @@ }, "input":{"shape":"CreateSecurityGroupRequest"}, "output":{"shape":"CreateSecurityGroupResult"}, - "documentation":"

Creates a security group.

A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

EC2-Classic: You can have up to 500 security groups.

EC2-VPC: You can create up to 500 security groups per VPC.

When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

" + "documentation":"

Creates a security group.

A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

EC2-Classic: You can have up to 500 security groups.

EC2-VPC: You can create up to 500 security groups per VPC.

When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -671,7 +671,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateSpotDatafeedSubscription":{ "name":"CreateSpotDatafeedSubscription", @@ -681,7 +681,7 @@ }, "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, "output":{"shape":"CreateSpotDatafeedSubscriptionResult"}, - "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon EC2 User Guide for Linux Instances.

" }, "CreateSubnet":{ "name":"CreateSubnet", @@ -691,7 +691,7 @@ }, "input":{"shape":"CreateSubnetRequest"}, "output":{"shape":"CreateSubnetResult"}, - "documentation":"

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateTags":{ "name":"CreateTags", @@ -700,7 +700,7 @@ "requestUri":"/" }, "input":{"shape":"CreateTagsRequest"}, - "documentation":"

Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateTransitGateway":{ "name":"CreateTransitGateway", @@ -750,7 +750,7 @@ }, "input":{"shape":"CreateVolumeRequest"}, "output":{"shape":"Volume"}, - "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpc":{ "name":"CreateVpc", @@ -760,7 +760,7 @@ }, "input":{"shape":"CreateVpcRequest"}, "output":{"shape":"CreateVpcResult"}, - "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpcEndpoint":{ "name":"CreateVpcEndpoint", @@ -770,7 +770,7 @@ }, "input":{"shape":"CreateVpcEndpointRequest"}, "output":{"shape":"CreateVpcEndpointResult"}, - "documentation":"

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

" + "documentation":"

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

" }, "CreateVpcEndpointConnectionNotification":{ "name":"CreateVpcEndpointConnectionNotification", @@ -780,7 +780,7 @@ }, "input":{"shape":"CreateVpcEndpointConnectionNotificationRequest"}, "output":{"shape":"CreateVpcEndpointConnectionNotificationResult"}, - "documentation":"

Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS topic to receive notifications. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.

You can create a connection notification for interface endpoints only.

" + "documentation":"

Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS topic to receive notifications. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.

You can create a connection notification for interface endpoints only.

" }, "CreateVpcEndpointServiceConfiguration":{ "name":"CreateVpcEndpointServiceConfiguration", @@ -790,7 +790,7 @@ }, "input":{"shape":"CreateVpcEndpointServiceConfigurationRequest"}, "output":{"shape":"CreateVpcEndpointServiceConfigurationResult"}, - "documentation":"

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

" }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -800,7 +800,7 @@ }, "input":{"shape":"CreateVpcPeeringConnectionRequest"}, "output":{"shape":"CreateVpcPeeringConnectionResult"}, - "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" + "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" }, "CreateVpnConnection":{ "name":"CreateVpnConnection", @@ -810,7 +810,7 @@ }, "input":{"shape":"CreateVpnConnectionRequest"}, "output":{"shape":"CreateVpnConnectionResult"}, - "documentation":"

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "CreateVpnConnectionRoute":{ "name":"CreateVpnConnectionRoute", @@ -819,7 +819,7 @@ "requestUri":"/" }, "input":{"shape":"CreateVpnConnectionRouteRequest"}, - "documentation":"

Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

For more information about VPN connections, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "CreateVpnGateway":{ "name":"CreateVpnGateway", @@ -829,7 +829,7 @@ }, "input":{"shape":"CreateVpnGatewayRequest"}, "output":{"shape":"CreateVpnGatewayResult"}, - "documentation":"

Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

For more information about virtual private gateways, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "DeleteClientVpnEndpoint":{ "name":"DeleteClientVpnEndpoint", @@ -1001,7 +1001,7 @@ "requestUri":"/" }, "input":{"shape":"DeletePlacementGroupRequest"}, - "documentation":"

Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteRoute":{ "name":"DeleteRoute", @@ -1037,7 +1037,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteSnapshotRequest"}, - "documentation":"

Deletes the specified snapshot.

When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified snapshot.

When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteSpotDatafeedSubscription":{ "name":"DeleteSpotDatafeedSubscription", @@ -1064,7 +1064,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteTagsRequest"}, - "documentation":"

Deletes the specified set of tags from the specified set of resources.

To list the current tags, use DescribeTags. For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified set of tags from the specified set of resources.

To list the current tags, use DescribeTags. For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteTransitGateway":{ "name":"DeleteTransitGateway", @@ -1113,7 +1113,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteVolumeRequest"}, - "documentation":"

Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

The volume can remain in the deleting state for several minutes.

For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

The volume can remain in the deleting state for several minutes.

For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteVpc":{ "name":"DeleteVpc", @@ -1228,7 +1228,7 @@ }, "input":{"shape":"DescribeAddressesRequest"}, "output":{"shape":"DescribeAddressesResult"}, - "documentation":"

Describes one or more of your Elastic IP addresses.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of your Elastic IP addresses.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeAggregateIdFormat":{ "name":"DescribeAggregateIdFormat", @@ -1248,7 +1248,7 @@ }, "input":{"shape":"DescribeAvailabilityZonesRequest"}, "output":{"shape":"DescribeAvailabilityZonesResult"}, - "documentation":"

Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeBundleTasks":{ "name":"DescribeBundleTasks", @@ -1348,7 +1348,7 @@ }, "input":{"shape":"DescribeConversionTasksRequest"}, "output":{"shape":"DescribeConversionTasksResult"}, - "documentation":"

Describes one or more of your conversion tasks. For more information, see the VM Import/Export User Guide.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" + "documentation":"

Describes one or more of your conversion tasks. For more information, see the VM Import/Export User Guide.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" }, "DescribeCustomerGateways":{ "name":"DescribeCustomerGateways", @@ -1358,7 +1358,7 @@ }, "input":{"shape":"DescribeCustomerGatewaysRequest"}, "output":{"shape":"DescribeCustomerGatewaysResult"}, - "documentation":"

Describes one or more of your VPN customer gateways.

For more information about VPN customer gateways, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your VPN customer gateways.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "DescribeDhcpOptions":{ "name":"DescribeDhcpOptions", @@ -1368,7 +1368,7 @@ }, "input":{"shape":"DescribeDhcpOptionsRequest"}, "output":{"shape":"DescribeDhcpOptionsResult"}, - "documentation":"

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your DHCP options sets.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeEgressOnlyInternetGateways":{ "name":"DescribeEgressOnlyInternetGateways", @@ -1388,7 +1388,7 @@ }, "input":{"shape":"DescribeElasticGpusRequest"}, "output":{"shape":"DescribeElasticGpusResult"}, - "documentation":"

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

" + "documentation":"

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

" }, "DescribeExportTasks":{ "name":"DescribeExportTasks", @@ -1468,7 +1468,7 @@ }, "input":{"shape":"DescribeHostReservationOfferingsRequest"}, "output":{"shape":"DescribeHostReservationOfferingsResult"}, - "documentation":"

Describes the Dedicated Host reservations that are available to purchase.

The results describe all the Dedicated Host reservation offerings, including offerings that may not match the instance family and Region of your Dedicated Hosts. When purchasing an offering, ensure that the instance family and Region of the offering matches that of the Dedicated Hosts with which it is to be associated. For more information about supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the Dedicated Host reservations that are available to purchase.

The results describe all the Dedicated Host reservation offerings, including offerings that may not match the instance family and Region of your Dedicated Hosts. When purchasing an offering, ensure that the instance family and Region of the offering matches that of the Dedicated Hosts with which it is to be associated. For more information about supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeHostReservations":{ "name":"DescribeHostReservations", @@ -1518,7 +1518,7 @@ }, "input":{"shape":"DescribeIdentityIdFormatRequest"}, "output":{"shape":"DescribeIdentityIdFormatResult"}, - "documentation":"

Describes the ID format settings for resources for the specified IAM user, IAM role, or root user. For example, you can view the resource types that are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

The following resource types support longer IDs: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

These settings apply to the principal specified in the request. They do not apply to the principal that makes the request.

" + "documentation":"

Describes the ID format settings for resources for the specified IAM user, IAM role, or root user. For example, you can view the resource types that are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

The following resource types support longer IDs: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

These settings apply to the principal specified in the request. They do not apply to the principal that makes the request.

" }, "DescribeImageAttribute":{ "name":"DescribeImageAttribute", @@ -1578,7 +1578,7 @@ }, "input":{"shape":"DescribeInstanceCreditSpecificationsRequest"}, "output":{"shape":"DescribeInstanceCreditSpecificationsResult"}, - "documentation":"

Describes the credit option for CPU usage of one or more of your T2 or T3 instances. The credit options are standard and unlimited.

If you do not specify an instance ID, Amazon EC2 returns T2 and T3 instances with the unlimited credit option, as well as instances that were previously configured as T2 or T3 with the unlimited credit option. For example, if you resize a T2 instance, while it is configured as unlimited, to an M4 instance, Amazon EC2 returns the M4 instance.

If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a T2 or T3 instance, an error is returned.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the credit option for CPU usage of one or more of your T2 or T3 instances. The credit options are standard and unlimited.

If you do not specify an instance ID, Amazon EC2 returns T2 and T3 instances with the unlimited credit option, as well as instances that were previously configured as T2 or T3 with the unlimited credit option. For example, if you resize a T2 instance, while it is configured as unlimited, to an M4 instance, Amazon EC2 returns the M4 instance.

If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a T2 or T3 instance, an error is returned.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeInstanceStatus":{ "name":"DescribeInstanceStatus", @@ -1588,7 +1588,7 @@ }, "input":{"shape":"DescribeInstanceStatusRequest"}, "output":{"shape":"DescribeInstanceStatusResult"}, - "documentation":"

Describes the status of one or more instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

Instance status includes the following components:

" + "documentation":"

Describes the status of one or more instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

Instance status includes the following components:

" }, "DescribeInstances":{ "name":"DescribeInstances", @@ -1618,7 +1618,7 @@ }, "input":{"shape":"DescribeKeyPairsRequest"}, "output":{"shape":"DescribeKeyPairsResult"}, - "documentation":"

Describes one or more of your key pairs.

For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of your key pairs.

For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeLaunchTemplateVersions":{ "name":"DescribeLaunchTemplateVersions", @@ -1668,7 +1668,7 @@ }, "input":{"shape":"DescribeNetworkAclsRequest"}, "output":{"shape":"DescribeNetworkAclsResult"}, - "documentation":"

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your network ACLs.

For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeNetworkInterfaceAttribute":{ "name":"DescribeNetworkInterfaceAttribute", @@ -1708,7 +1708,7 @@ }, "input":{"shape":"DescribePlacementGroupsRequest"}, "output":{"shape":"DescribePlacementGroupsResult"}, - "documentation":"

Describes one or more of your placement groups. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of your placement groups. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribePrefixLists":{ "name":"DescribePrefixLists", @@ -1748,7 +1748,7 @@ }, "input":{"shape":"DescribeRegionsRequest"}, "output":{"shape":"DescribeRegionsResult"}, - "documentation":"

Describes one or more regions that are currently available to you.

For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

" + "documentation":"

Describes one or more regions that are currently available to you.

For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

" }, "DescribeReservedInstances":{ "name":"DescribeReservedInstances", @@ -1758,7 +1758,7 @@ }, "input":{"shape":"DescribeReservedInstancesRequest"}, "output":{"shape":"DescribeReservedInstancesResult"}, - "documentation":"

Describes one or more of the Reserved Instances that you purchased.

For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of the Reserved Instances that you purchased.

For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeReservedInstancesListings":{ "name":"DescribeReservedInstancesListings", @@ -1768,7 +1768,7 @@ }, "input":{"shape":"DescribeReservedInstancesListingsRequest"}, "output":{"shape":"DescribeReservedInstancesListingsResult"}, - "documentation":"

Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeReservedInstancesModifications":{ "name":"DescribeReservedInstancesModifications", @@ -1778,7 +1778,7 @@ }, "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, "output":{"shape":"DescribeReservedInstancesModificationsResult"}, - "documentation":"

Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeReservedInstancesOfferings":{ "name":"DescribeReservedInstancesOfferings", @@ -1788,7 +1788,7 @@ }, "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, "output":{"shape":"DescribeReservedInstancesOfferingsResult"}, - "documentation":"

Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeRouteTables":{ "name":"DescribeRouteTables", @@ -1798,7 +1798,7 @@ }, "input":{"shape":"DescribeRouteTablesRequest"}, "output":{"shape":"DescribeRouteTablesResult"}, - "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your route tables.

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeScheduledInstanceAvailability":{ "name":"DescribeScheduledInstanceAvailability", @@ -1838,7 +1838,7 @@ }, "input":{"shape":"DescribeSecurityGroupsRequest"}, "output":{"shape":"DescribeSecurityGroupsResult"}, - "documentation":"

Describes one or more of your security groups.

A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your security groups.

A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeSnapshotAttribute":{ "name":"DescribeSnapshotAttribute", @@ -1848,7 +1848,7 @@ }, "input":{"shape":"DescribeSnapshotAttributeRequest"}, "output":{"shape":"DescribeSnapshotAttributeResult"}, - "documentation":"

Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSnapshots":{ "name":"DescribeSnapshots", @@ -1858,7 +1858,7 @@ }, "input":{"shape":"DescribeSnapshotsRequest"}, "output":{"shape":"DescribeSnapshotsResult"}, - "documentation":"

Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

The create volume permissions fall into the following categories:

The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

The create volume permissions fall into the following categories:

The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSpotDatafeedSubscription":{ "name":"DescribeSpotDatafeedSubscription", @@ -1868,7 +1868,7 @@ }, "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"}, - "documentation":"

Describes the data feed for Spot Instances. For more information, see Spot Instance Data Feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Describes the data feed for Spot Instances. For more information, see Spot Instance Data Feed in the Amazon EC2 User Guide for Linux Instances.

" }, "DescribeSpotFleetInstances":{ "name":"DescribeSpotFleetInstances", @@ -1908,7 +1908,7 @@ }, "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, "output":{"shape":"DescribeSpotInstanceRequestsResult"}, - "documentation":"

Describes the specified Spot Instance requests.

You can use DescribeSpotInstanceRequests to find a running Spot Instance by examining the response. If the status of the Spot Instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

Spot Instance requests are deleted four hours after they are canceled and their instances are terminated.

" + "documentation":"

Describes the specified Spot Instance requests.

You can use DescribeSpotInstanceRequests to find a running Spot Instance by examining the response. If the status of the Spot Instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

We recommend that you set MaxResults to a value between 5 and 1000 to limit the number of results returned. This paginates the output, which makes the list more manageable and returns the results faster. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSpotInstanceRequests request to retrieve the remaining results.

Spot Instance requests are deleted four hours after they are canceled and their instances are terminated.

" }, "DescribeSpotPriceHistory":{ "name":"DescribeSpotPriceHistory", @@ -1918,7 +1918,7 @@ }, "input":{"shape":"DescribeSpotPriceHistoryRequest"}, "output":{"shape":"DescribeSpotPriceHistoryResult"}, - "documentation":"

Describes the Spot price history. For more information, see Spot Instance Pricing History in the Amazon EC2 User Guide for Linux Instances.

When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

" + "documentation":"

Describes the Spot price history. For more information, see Spot Instance Pricing History in the Amazon EC2 User Guide for Linux Instances.

When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

" }, "DescribeStaleSecurityGroups":{ "name":"DescribeStaleSecurityGroups", @@ -1938,7 +1938,7 @@ }, "input":{"shape":"DescribeSubnetsRequest"}, "output":{"shape":"DescribeSubnetsResult"}, - "documentation":"

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your subnets.

For more information, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -1948,7 +1948,7 @@ }, "input":{"shape":"DescribeTagsRequest"}, "output":{"shape":"DescribeTagsResult"}, - "documentation":"

Describes one or more of the tags for your EC2 resources.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes one or more of the tags for your EC2 resources.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeTransitGatewayAttachments":{ "name":"DescribeTransitGatewayAttachments", @@ -1998,7 +1998,7 @@ }, "input":{"shape":"DescribeVolumeAttributeRequest"}, "output":{"shape":"DescribeVolumeAttributeResult"}, - "documentation":"

Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVolumeStatus":{ "name":"DescribeVolumeStatus", @@ -2008,7 +2008,7 @@ }, "input":{"shape":"DescribeVolumeStatusRequest"}, "output":{"shape":"DescribeVolumeStatusResult"}, - "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitoring the Status of Your Volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" + "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitoring the Status of Your Volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" }, "DescribeVolumes":{ "name":"DescribeVolumes", @@ -2018,7 +2018,7 @@ }, "input":{"shape":"DescribeVolumesRequest"}, "output":{"shape":"DescribeVolumesResult"}, - "documentation":"

Describes the specified EBS volumes.

If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS volumes.

If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVolumesModifications":{ "name":"DescribeVolumesModifications", @@ -2028,7 +2028,7 @@ }, "input":{"shape":"DescribeVolumesModificationsRequest"}, "output":{"shape":"DescribeVolumesModificationsResult"}, - "documentation":"

Reports the current modification status of EBS volumes.

Current-generation EBS volumes support modification of attributes including type, size, and (for io1 volumes) IOPS provisioning while either attached to or detached from an instance. Following an action from the API or the console to modify a volume, the status of the modification may be modifying, optimizing, completed, or failed. If a volume has never been modified, then certain elements of the returned VolumeModification objects are null.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications\" in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Reports the current modification status of EBS volumes.

Current-generation EBS volumes support modification of attributes including type, size, and (for io1 volumes) IOPS provisioning while either attached to or detached from an instance. Following an action from the API or the console to modify a volume, the status of the modification may be modifying, optimizing, completed, or failed. If a volume has never been modified, then certain elements of the returned VolumeModification objects are null.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications\" in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVpcAttribute":{ "name":"DescribeVpcAttribute", @@ -2058,7 +2058,7 @@ }, "input":{"shape":"DescribeVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"DescribeVpcClassicLinkDnsSupportResult"}, - "documentation":"

Describes the ClassicLink DNS support status of one or more VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the ClassicLink DNS support status of one or more VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVpcEndpointConnectionNotifications":{ "name":"DescribeVpcEndpointConnectionNotifications", @@ -2148,7 +2148,7 @@ }, "input":{"shape":"DescribeVpnConnectionsRequest"}, "output":{"shape":"DescribeVpnConnectionsResult"}, - "documentation":"

Describes one or more of your VPN connections.

For more information about VPN connections, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your VPN connections.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "DescribeVpnGateways":{ "name":"DescribeVpnGateways", @@ -2158,7 +2158,7 @@ }, "input":{"shape":"DescribeVpnGatewaysRequest"}, "output":{"shape":"DescribeVpnGatewaysResult"}, - "documentation":"

Describes one or more of your virtual private gateways.

For more information about virtual private gateways, see AWS Managed VPN Connections in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Describes one or more of your virtual private gateways.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "DetachClassicLinkVpc":{ "name":"DetachClassicLinkVpc", @@ -2196,7 +2196,7 @@ }, "input":{"shape":"DetachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DetachVpnGateway":{ "name":"DetachVpnGateway", @@ -2244,7 +2244,7 @@ }, "input":{"shape":"DisableVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"DisableVpcClassicLinkDnsSupportResult"}, - "documentation":"

Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "DisassociateAddress":{ "name":"DisassociateAddress", @@ -2253,7 +2253,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateAddressRequest"}, - "documentation":"

Disassociates an Elastic IP address from the instance or network interface it's associated with.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

" + "documentation":"

Disassociates an Elastic IP address from the instance or network interface it's associated with.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

" }, "DisassociateClientVpnTargetNetwork":{ "name":"DisassociateClientVpnTargetNetwork", @@ -2282,7 +2282,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateRouteTableRequest"}, - "documentation":"

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "DisassociateSubnetCidrBlock":{ "name":"DisassociateSubnetCidrBlock", @@ -2350,7 +2350,7 @@ }, "input":{"shape":"EnableVpcClassicLinkRequest"}, "output":{"shape":"EnableVpcClassicLinkResult"}, - "documentation":"

Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "EnableVpcClassicLinkDnsSupport":{ "name":"EnableVpcClassicLinkDnsSupport", @@ -2360,7 +2360,7 @@ }, "input":{"shape":"EnableVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"EnableVpcClassicLinkDnsSupportResult"}, - "documentation":"

Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" }, "ExportClientVpnClientCertificateRevocationList":{ "name":"ExportClientVpnClientCertificateRevocationList", @@ -2400,7 +2400,7 @@ }, "input":{"shape":"GetConsoleOutputRequest"}, "output":{"shape":"GetConsoleOutputResult"}, - "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance Console Output in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance Console Output in the Amazon Elastic Compute Cloud User Guide.

" }, "GetConsoleScreenshot":{ "name":"GetConsoleScreenshot", @@ -2440,7 +2440,7 @@ }, "input":{"shape":"GetPasswordDataRequest"}, "output":{"shape":"GetPasswordDataResult"}, - "documentation":"

Retrieves the encrypted administrator password for a running Windows instance.

The Windows password is generated at boot by the EC2Config service or EC2Launch scripts (Windows Server 2016 and later). This usually only happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the Amazon Elastic Compute Cloud User Guide.

For the EC2Config service, the password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

When you launch an instance, password generation and encryption may take a few minutes. If you try to retrieve the password before it's available, the output returns an empty string. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

" + "documentation":"

Retrieves the encrypted administrator password for a running Windows instance.

The Windows password is generated at boot by the EC2Config service or EC2Launch scripts (Windows Server 2016 and later). This usually only happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the Amazon Elastic Compute Cloud User Guide.

For the EC2Config service, the password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

When you launch an instance, password generation and encryption may take a few minutes. If you try to retrieve the password before it's available, the output returns an empty string. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

" }, "GetReservedInstancesExchangeQuote":{ "name":"GetReservedInstancesExchangeQuote", @@ -2500,7 +2500,7 @@ }, "input":{"shape":"ImportImageRequest"}, "output":{"shape":"ImportImageResult"}, - "documentation":"

Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI). For more information, see Importing a VM as an Image Using VM Import/Export in the VM Import/Export User Guide.

" + "documentation":"

Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI). For more information, see Importing a VM as an Image Using VM Import/Export in the VM Import/Export User Guide.

" }, "ImportInstance":{ "name":"ImportInstance", @@ -2510,7 +2510,7 @@ }, "input":{"shape":"ImportInstanceRequest"}, "output":{"shape":"ImportInstanceResult"}, - "documentation":"

Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" + "documentation":"

Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" }, "ImportKeyPair":{ "name":"ImportKeyPair", @@ -2520,7 +2520,7 @@ }, "input":{"shape":"ImportKeyPairRequest"}, "output":{"shape":"ImportKeyPairResult"}, - "documentation":"

Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

" }, "ImportSnapshot":{ "name":"ImportSnapshot", @@ -2540,7 +2540,7 @@ }, "input":{"shape":"ImportVolumeRequest"}, "output":{"shape":"ImportVolumeResult"}, - "documentation":"

Creates an import volume task using metadata from the specified disk image.For more information, see Importing Disks to Amazon EBS.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" + "documentation":"

Creates an import volume task using metadata from the specified disk image.For more information, see Importing Disks to Amazon EBS.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" }, "ModifyCapacityReservation":{ "name":"ModifyCapacityReservation", @@ -2599,7 +2599,7 @@ "requestUri":"/" }, "input":{"shape":"ModifyIdFormatRequest"}, - "documentation":"

Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

This request can only be used to modify longer ID settings for resource types that are within the opt-in period. Resources currently in their opt-in period include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user. If you're using this action as the root user, then these settings apply to the entire account, unless an IAM user explicitly overrides these settings for themselves. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" + "documentation":"

Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

This request can only be used to modify longer ID settings for resource types that are within the opt-in period. Resources currently in their opt-in period include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user. If you're using this action as the root user, then these settings apply to the entire account, unless an IAM user explicitly overrides these settings for themselves. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" }, "ModifyIdentityIdFormat":{ "name":"ModifyIdentityIdFormat", @@ -2608,7 +2608,7 @@ "requestUri":"/" }, "input":{"shape":"ModifyIdentityIdFormatRequest"}, - "documentation":"

Modifies the ID format of a resource for a specified IAM user, IAM role, or the root user for an account; or all IAM users, IAM roles, and the root user for an account. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

This request can only be used to modify longer ID settings for resource types that are within the opt-in period. Resources currently in their opt-in period include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

This setting applies to the principal specified in the request; it does not apply to the principal that makes the request.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" + "documentation":"

Modifies the ID format of a resource for a specified IAM user, IAM role, or the root user for an account; or all IAM users, IAM roles, and the root user for an account. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

This request can only be used to modify longer ID settings for resource types that are within the opt-in period. Resources currently in their opt-in period include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

This setting applies to the principal specified in the request; it does not apply to the principal that makes the request.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" }, "ModifyImageAttribute":{ "name":"ModifyImageAttribute", @@ -2626,7 +2626,7 @@ "requestUri":"/" }, "input":{"shape":"ModifyInstanceAttributeRequest"}, - "documentation":"

Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

Note: Using this action to change the security groups associated with an elastic network interface (ENI) attached to an instance in a VPC can result in an error if the instance has more than one ENI. To change the security groups associated with an ENI attached to an instance that has multiple ENIs, we recommend that you use the ModifyNetworkInterfaceAttribute action.

To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

Note: Using this action to change the security groups associated with an elastic network interface (ENI) attached to an instance in a VPC can result in an error if the instance has more than one ENI. To change the security groups associated with an ENI attached to an instance that has multiple ENIs, we recommend that you use the ModifyNetworkInterfaceAttribute action.

To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyInstanceCapacityReservationAttributes":{ "name":"ModifyInstanceCapacityReservationAttributes", @@ -2646,7 +2646,7 @@ }, "input":{"shape":"ModifyInstanceCreditSpecificationRequest"}, "output":{"shape":"ModifyInstanceCreditSpecificationResult"}, - "documentation":"

Modifies the credit option for CPU usage on a running or stopped T2 or T3 instance. The credit options are standard and unlimited.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the credit option for CPU usage on a running or stopped T2 or T3 instance. The credit options are standard and unlimited.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyInstancePlacement":{ "name":"ModifyInstancePlacement", @@ -2656,7 +2656,7 @@ }, "input":{"shape":"ModifyInstancePlacementRequest"}, "output":{"shape":"ModifyInstancePlacementResult"}, - "documentation":"

Modifies the placement attributes for a specified instance. You can do the following:

At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request.

To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state.

" + "documentation":"

Modifies the placement attributes for a specified instance. You can do the following:

At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request.

To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state.

" }, "ModifyLaunchTemplate":{ "name":"ModifyLaunchTemplate", @@ -2685,7 +2685,7 @@ }, "input":{"shape":"ModifyReservedInstancesRequest"}, "output":{"shape":"ModifyReservedInstancesResult"}, - "documentation":"

Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifySnapshotAttribute":{ "name":"ModifySnapshotAttribute", @@ -2694,7 +2694,7 @@ "requestUri":"/" }, "input":{"shape":"ModifySnapshotAttributeRequest"}, - "documentation":"

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifySpotFleetRequest":{ "name":"ModifySpotFleetRequest", @@ -2733,7 +2733,7 @@ }, "input":{"shape":"ModifyVolumeRequest"}, "output":{"shape":"ModifyVolumeResult"}, - "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using the DescribeVolumesModifications API. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" + "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using the DescribeVolumesModifications API. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" }, "ModifyVolumeAttribute":{ "name":"ModifyVolumeAttribute", @@ -2761,7 +2761,7 @@ }, "input":{"shape":"ModifyVpcEndpointRequest"}, "output":{"shape":"ModifyVpcEndpointResult"}, - "documentation":"

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

" }, "ModifyVpcEndpointConnectionNotification":{ "name":"ModifyVpcEndpointConnectionNotification", @@ -2791,7 +2791,7 @@ }, "input":{"shape":"ModifyVpcEndpointServicePermissionsRequest"}, "output":{"shape":"ModifyVpcEndpointServicePermissionsResult"}, - "documentation":"

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

" + "documentation":"

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

If you grant permissions to all principals, the service is public. Any users who know the name of a public service can send a request to attach an endpoint. If the service does not require manual approval, attachments are automatically approved.

" }, "ModifyVpcPeeringConnectionOptions":{ "name":"ModifyVpcPeeringConnectionOptions", @@ -2811,7 +2811,7 @@ }, "input":{"shape":"ModifyVpcTenancyRequest"}, "output":{"shape":"ModifyVpcTenancyResult"}, - "documentation":"

Modifies the instance tenancy attribute of the specified VPC. You can change the instance tenancy attribute of a VPC to default only. You cannot change the instance tenancy attribute to dedicated.

After you modify the tenancy of the VPC, any new instances that you launch into the VPC have a tenancy of default, unless you specify otherwise during launch. The tenancy of any existing instances in the VPC is not affected.

For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the instance tenancy attribute of the specified VPC. You can change the instance tenancy attribute of a VPC to default only. You cannot change the instance tenancy attribute to dedicated.

After you modify the tenancy of the VPC, any new instances that you launch into the VPC have a tenancy of default, unless you specify otherwise during launch. The tenancy of any existing instances in the VPC is not affected.

For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "MonitorInstances":{ "name":"MonitorInstances", @@ -2821,7 +2821,7 @@ }, "input":{"shape":"MonitorInstancesRequest"}, "output":{"shape":"MonitorInstancesResult"}, - "documentation":"

Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

To disable detailed monitoring, see .

" + "documentation":"

Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

To disable detailed monitoring, see .

" }, "MoveAddressToVpc":{ "name":"MoveAddressToVpc", @@ -2841,7 +2841,7 @@ }, "input":{"shape":"ProvisionByoipCidrRequest"}, "output":{"shape":"ProvisionByoipCidrResult"}, - "documentation":"

Provisions an address range for use with your AWS resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr.

AWS verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool.

" + "documentation":"

Provisions an address range for use with your AWS resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr.

AWS verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool.

" }, "PurchaseHostReservation":{ "name":"PurchaseHostReservation", @@ -2861,7 +2861,7 @@ }, "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, "output":{"shape":"PurchaseReservedInstancesOfferingResult"}, - "documentation":"

Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower hourly rate compared to On-Demand instance pricing.

Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower hourly rate compared to On-Demand instance pricing.

Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

" }, "PurchaseScheduledInstances":{ "name":"PurchaseScheduledInstances", @@ -2880,7 +2880,7 @@ "requestUri":"/" }, "input":{"shape":"RebootInstancesRequest"}, - "documentation":"

Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "RegisterImage":{ "name":"RegisterImage", @@ -2890,7 +2890,7 @@ }, "input":{"shape":"RegisterImageRequest"}, "output":{"shape":"RegisterImageResult"}, - "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and instances launched from such an AMI are not able to connect to package update infrastructure. If you purchase a Reserved Instance offering for one of these Linux distributions and launch instances using an AMI that does not contain the required billing code, your Reserved Instance is not applied to these instances.

To create an AMI for operating systems that require a billing code, see CreateImage.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" + "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and instances launched from such an AMI are not able to connect to package update infrastructure. If you purchase a Reserved Instance offering for one of these Linux distributions and launch instances using an AMI that does not contain the required billing code, your Reserved Instance is not applied to these instances.

To create an AMI for operating systems that require a billing code, see CreateImage.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" }, "RejectTransitGatewayVpcAttachment":{ "name":"RejectTransitGatewayVpcAttachment", @@ -2959,7 +2959,7 @@ }, "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, "output":{"shape":"ReplaceNetworkAclAssociationResult"}, - "documentation":"

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

" + "documentation":"

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

" }, "ReplaceNetworkAclEntry":{ "name":"ReplaceNetworkAclEntry", @@ -2968,7 +2968,7 @@ "requestUri":"/" }, "input":{"shape":"ReplaceNetworkAclEntryRequest"}, - "documentation":"

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

" }, "ReplaceRoute":{ "name":"ReplaceRoute", @@ -2977,7 +2977,7 @@ "requestUri":"/" }, "input":{"shape":"ReplaceRouteRequest"}, - "documentation":"

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Replaces an existing route within a route table in a VPC. You must provide only one of the following: internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only internet gateway.

For more information, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "ReplaceRouteTableAssociation":{ "name":"ReplaceRouteTableAssociation", @@ -2987,7 +2987,7 @@ }, "input":{"shape":"ReplaceRouteTableAssociationRequest"}, "output":{"shape":"ReplaceRouteTableAssociationResult"}, - "documentation":"

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

" + "documentation":"

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

" }, "ReplaceTransitGatewayRoute":{ "name":"ReplaceTransitGatewayRoute", @@ -3016,7 +3016,7 @@ }, "input":{"shape":"RequestSpotFleetRequest"}, "output":{"shape":"RequestSpotFleetResponse"}, - "documentation":"

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Instances. You cannot tag other resource types in a Spot Fleet request because only the instance resource type is supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Instances. You cannot tag other resource types in a Spot Fleet request because only the instance resource type is supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

" }, "RequestSpotInstances":{ "name":"RequestSpotInstances", @@ -3026,7 +3026,7 @@ }, "input":{"shape":"RequestSpotInstancesRequest"}, "output":{"shape":"RequestSpotInstancesResult"}, - "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

" }, "ResetFpgaImageAttribute":{ "name":"ResetFpgaImageAttribute", @@ -3054,7 +3054,7 @@ "requestUri":"/" }, "input":{"shape":"ResetInstanceAttributeRequest"}, - "documentation":"

Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped.

The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped.

The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

" }, "ResetNetworkInterfaceAttribute":{ "name":"ResetNetworkInterfaceAttribute", @@ -3072,7 +3072,7 @@ "requestUri":"/" }, "input":{"shape":"ResetSnapshotAttributeRequest"}, - "documentation":"

Resets permission settings for the specified snapshot.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Resets permission settings for the specified snapshot.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "RestoreAddressToClassic":{ "name":"RestoreAddressToClassic", @@ -3120,7 +3120,7 @@ }, "input":{"shape":"RunInstancesRequest"}, "output":{"shape":"Reservation"}, - "documentation":"

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging Your Amazon EC2 Resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

For troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging Your Amazon EC2 Resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

For troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "RunScheduledInstances":{ "name":"RunScheduledInstances", @@ -3130,7 +3130,7 @@ }, "input":{"shape":"RunScheduledInstancesRequest"}, "output":{"shape":"RunScheduledInstancesResult"}, - "documentation":"

Launches the specified Scheduled Instances.

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Launches the specified Scheduled Instances.

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "SearchTransitGatewayRoutes":{ "name":"SearchTransitGatewayRoutes", @@ -3150,7 +3150,7 @@ }, "input":{"shape":"StartInstancesRequest"}, "output":{"shape":"StartInstancesResult"}, - "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "StopInstances":{ "name":"StopInstances", @@ -3160,7 +3160,7 @@ }, "input":{"shape":"StopInstancesRequest"}, "output":{"shape":"StopInstancesResult"}, - "documentation":"

Stops an Amazon EBS-backed instance.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't start, stop, or hibernate Spot Instances, and you can't stop or hibernate instance store-backed instances. For information about using hibernation for Spot Instances, see Hibernating Interrupted Spot Instances in the Amazon Elastic Compute Cloud User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Stops an Amazon EBS-backed instance.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't start, stop, or hibernate Spot Instances, and you can't stop or hibernate instance store-backed instances. For information about using hibernation for Spot Instances, see Hibernating Interrupted Spot Instances in the Amazon Elastic Compute Cloud User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "TerminateClientVpnConnections":{ "name":"TerminateClientVpnConnections", @@ -3180,7 +3180,7 @@ }, "input":{"shape":"TerminateInstancesRequest"}, "output":{"shape":"TerminateInstancesResult"}, - "documentation":"

Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

Terminated instances remain visible after termination (for approximately one hour).

By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

Terminated instances remain visible after termination (for approximately one hour).

By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "UnassignIpv6Addresses":{ "name":"UnassignIpv6Addresses", @@ -3209,7 +3209,7 @@ }, "input":{"shape":"UnmonitorInstancesRequest"}, "output":{"shape":"UnmonitorInstancesResult"}, - "documentation":"

Disables detailed monitoring for a running instance. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Disables detailed monitoring for a running instance. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "UpdateSecurityGroupRuleDescriptionsEgress":{ "name":"UpdateSecurityGroupRuleDescriptionsEgress", @@ -3522,7 +3522,7 @@ "members":{ "Cidr":{ "shape":"String", - "documentation":"

The IPv4 address range, in CIDR notation.

" + "documentation":"

The IPv4 address range, in CIDR notation. This must be the exact range that you provisioned. You can't advertise only a portion of the provisioned range.

" }, "DryRun":{ "shape":"Boolean", @@ -3614,7 +3614,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"clientToken" }, "InstanceType":{ @@ -4500,7 +4500,7 @@ }, "SourceSecurityGroupOwnerId":{ "shape":"String", - "documentation":"

[EC2-Classic] The AWS account ID for the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

" + "documentation":"

[nondefault VPC] The AWS account ID for the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

" }, "ToPort":{ "shape":"Integer", @@ -5275,7 +5275,10 @@ "Windows with SQL Server", "Windows with SQL Server Enterprise", "Windows with SQL Server Standard", - "Windows with SQL Server Web" + "Windows with SQL Server Web", + "Linux with SQL Server Standard", + "Linux with SQL Server Web", + "Linux with SQL Server Enterprise" ] }, "CapacityReservationPreference":{ @@ -5304,7 +5307,7 @@ "documentation":"

Information about the target Capacity Reservation.

" } }, - "documentation":"

Describes an instance's Capacity Reservation targeting option. You can specify only one option at a time. Use the CapacityReservationPreference parameter to configure the instance to run as an On-Demand Instance or to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). Use the CapacityReservationTarget parameter to explicitly target a specific Capacity Reservation.

" + "documentation":"

Describes an instance's Capacity Reservation targeting option. You can specify only one parameter at a time. If you specify CapacityReservationPreference and CapacityReservationTarget, the request fails.

Use the CapacityReservationPreference parameter to configure the instance to run as an On-Demand Instance or to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). Use the CapacityReservationTarget parameter to explicitly target a specific Capacity Reservation.

" }, "CapacityReservationSpecificationResponse":{ "type":"structure", @@ -5759,12 +5762,12 @@ }, "DeletionTime":{ "shape":"String", - "documentation":"

The date and time the Client VPN endpoint was deleted, if applicable. Information about deleted Client VPN endpoints is retained for 24 hours, unless a new Client VPN is created with the same name.

", + "documentation":"

The date and time the Client VPN endpoint was deleted, if applicable.

", "locationName":"deletionTime" }, "DnsName":{ "shape":"String", - "documentation":"

The DNS name to be used by clients when establishing a connection.

", + "documentation":"

The DNS name to be used by clients when connecting to the Client VPN endpoint.

", "locationName":"dnsName" }, "ClientCidrBlock":{ @@ -5772,9 +5775,14 @@ "documentation":"

The IPv4 address range, in CIDR notation, from which client IP addresses are assigned.

", "locationName":"clientCidrBlock" }, + "DnsServers":{ + "shape":"ValueStringList", + "documentation":"

Information about the DNS servers to be used for DNS resolution.

", + "locationName":"dnsServer" + }, "SplitTunnel":{ "shape":"Boolean", - "documentation":"

Indicates whether VPN split tunneling is supported.

", + "documentation":"

Indicates whether VPN split tunneling is supported.

", "locationName":"splitTunnel" }, "VpnProtocol":{ @@ -5790,6 +5798,8 @@ "AssociatedTargetNetworks":{ "shape":"AssociatedTargetNetworkSet", "documentation":"

Information about the associated target networks. A target network is a subnet in a VPC.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated. To view the target networks associated with a Client VPN endpoint, call DescribeClientVpnTargetNetworks and inspect the clientVpnTargetNetworks response element.", "locationName":"associatedTargetNetwork" }, "ServerCertificateArn":{ @@ -5806,6 +5816,11 @@ "shape":"ConnectionLogResponseOptions", "documentation":"

Information about the client connection logging options for the Client VPN endpoint.

", "locationName":"connectionLogOptions" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags assigned to the Client VPN endpoint.

", + "locationName":"tagSet" } }, "documentation":"

Describes a Client VPN endpoint.

" @@ -6137,7 +6152,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" } } }, @@ -6161,7 +6176,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

" }, "Description":{ "shape":"String", @@ -6169,7 +6184,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the destination snapshots of the copied image should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an encrypted snapshot. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Specifies whether the destination snapshots of the copied image should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an encrypted snapshot. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"encrypted" }, "KmsKeyId":{ @@ -6226,7 +6241,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the destination snapshot should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot use it to create an unencrypted copy of an encrypted snapshot. Your default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Specifies whether the destination snapshot should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot use it to create an unencrypted copy of an encrypted snapshot. Your default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"encrypted" }, "KmsKeyId":{ @@ -6236,7 +6251,7 @@ }, "PresignedUrl":{ "shape":"String", - "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query Requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", + "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query Requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", "locationName":"presignedUrl" }, "SourceRegion":{ @@ -6307,11 +6322,11 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

" }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type for which to reserve capacity. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The instance type for which to reserve capacity. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

" }, "InstancePlatform":{ "shape":"CapacityReservationInstancePlatform", @@ -6413,8 +6428,13 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", "idempotencyToken":true + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the Client VPN endpoint during creation.

", + "locationName":"TagSpecification" } } }, @@ -6593,7 +6613,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" }, "DryRun":{ "shape":"Boolean", @@ -6635,12 +6655,12 @@ }, "ErrorCode":{ "shape":"String", - "documentation":"

The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.

", + "documentation":"

The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.

", "locationName":"errorCode" }, "ErrorMessage":{ "shape":"String", - "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", + "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", "locationName":"errorMessage" } }, @@ -6704,7 +6724,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" }, "SpotOptions":{ "shape":"SpotOptionsRequest", @@ -6732,7 +6752,7 @@ }, "Type":{ "shape":"FleetType", - "documentation":"

The type of the request. By default, the EC2 Fleet places an asynchronous request for your desired capacity, and maintains it by replenishing interrupted Spot Instances (maintain). A value of instant places a synchronous one-time request, and returns errors for any instances that could not be launched. A value of request places an asynchronous one-time request without maintaining capacity or submitting requests in alternative capacity pools if capacity is unavailable. For more information, see EC2 Fleet Request Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The type of the request. By default, the EC2 Fleet places an asynchronous request for your desired capacity, and maintains it by replenishing interrupted Spot Instances (maintain). A value of instant places a synchronous one-time request, and returns errors for any instances that could not be launched. A value of request places an asynchronous one-time request without maintaining capacity or submitting requests in alternative capacity pools if capacity is unavailable. For more information, see EC2 Fleet Request Types in the Amazon Elastic Compute Cloud User Guide.

" }, "ValidFrom":{ "shape":"DateTime", @@ -6748,7 +6768,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.

", + "documentation":"

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.

", "locationName":"TagSpecification" } } @@ -6787,7 +6807,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" }, "DeliverLogsPermissionArn":{ "shape":"String", @@ -6866,7 +6886,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" } } }, @@ -7022,7 +7042,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraint: Maximum 128 ASCII characters.

" }, "LaunchTemplateName":{ "shape":"LaunchTemplateName", @@ -7058,7 +7078,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraint: Maximum 128 ASCII characters.

" }, "LaunchTemplateId":{ "shape":"String", @@ -7105,7 +7125,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

" }, "SubnetId":{ "shape":"String", @@ -7298,7 +7318,7 @@ }, "SecondaryPrivateIpAddressCount":{ "shape":"Integer", - "documentation":"

The number of secondary private IPv4 addresses to assign to a network interface. When you specify a number of secondary IPv4 addresses, Amazon EC2 selects these IP addresses within the subnet's IPv4 CIDR range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

The number of IP addresses you can assign to a network interface varies by instance type. For more information, see IP Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide.

", + "documentation":"

The number of secondary private IPv4 addresses to assign to a network interface. When you specify a number of secondary IPv4 addresses, Amazon EC2 selects these IP addresses within the subnet's IPv4 CIDR range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

The number of IP addresses you can assign to a network interface varies by instance type. For more information, see IP Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide.

", "locationName":"secondaryPrivateIpAddressCount" }, "SubnetId":{ @@ -7355,7 +7375,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

", "locationName":"clientToken" }, "InstanceCount":{ @@ -7850,12 +7870,12 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"encrypted" }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Range is 100 to 64,000 IOPS for volumes in most regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

This parameter is valid only for Provisioned IOPS SSD (io1) volumes.

" + "documentation":"

The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Range is 100 to 64,000 IOPS for volumes in most regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

This parameter is valid only for Provisioned IOPS SSD (io1) volumes.

" }, "KmsKeyId":{ "shape":"String", @@ -7863,11 +7883,11 @@ }, "Size":{ "shape":"Integer", - "documentation":"

The size of the volume, in GiBs.

Constraints: 1-16,384 for gp2, 4-16,384 for io1, 500-16,384 for st1, 500-16,384 for sc1, and 1-1,024 for standard. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

" + "documentation":"

The size of the volume, in GiBs.

Constraints: 1-16,384 for gp2, 4-16,384 for io1, 500-16,384 for st1, 500-16,384 for sc1, and 1-1,024 for standard. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

At least one of Size or SnapshotId are required.

" }, "SnapshotId":{ "shape":"String", - "documentation":"

The snapshot from which to create the volume.

" + "documentation":"

The snapshot from which to create the volume.

At least one of Size or SnapshotId are required.

" }, "VolumeType":{ "shape":"VolumeType", @@ -7915,7 +7935,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" } } }, @@ -7978,7 +7998,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" }, "PrivateDnsEnabled":{ "shape":"Boolean", @@ -8022,7 +8042,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" } } }, @@ -9367,7 +9387,7 @@ }, "PublicIps":{ "shape":"PublicIpStringList", - "documentation":"

[EC2-Classic] One or more Elastic IP addresses.

Default: Describes all your Elastic IP addresses.

", + "documentation":"

One or more Elastic IP addresses.

Default: Describes all your Elastic IP addresses.

", "locationName":"PublicIp" }, "AllocationIds":{ @@ -10034,12 +10054,12 @@ }, "ErrorCode":{ "shape":"String", - "documentation":"

The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.

", + "documentation":"

The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.

", "locationName":"errorCode" }, "ErrorMessage":{ "shape":"String", - "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", + "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", "locationName":"errorMessage" } }, @@ -10597,7 +10617,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "ImageIds":{ @@ -10834,7 +10854,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "InstanceIds":{ @@ -11548,7 +11568,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type that the reservation will cover (for example, m1.small). For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The instance type that the reservation will cover (for example, m1.small). For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

" }, "MaxDuration":{ "shape":"Long", @@ -12170,7 +12190,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -12182,6 +12202,14 @@ "shape":"SpotInstanceRequestIdList", "documentation":"

One or more Spot Instance request IDs.

", "locationName":"SpotInstanceRequestId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to request the next set of results. This value is null when there are no more results to return.

" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in a single call. Specify a value between 5 and 1000. To retrieve the remaining results, make another call with the returned NextToken value.

" } }, "documentation":"

Contains the parameters for DescribeSpotInstanceRequests.

" @@ -12193,6 +12221,11 @@ "shape":"SpotInstanceRequestList", "documentation":"

One or more Spot Instance requests.

", "locationName":"spotInstanceRequestSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next set of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" } }, "documentation":"

Contains the output of DescribeSpotInstanceRequests.

" @@ -12380,7 +12413,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -12421,7 +12454,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -12462,7 +12495,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -12503,7 +12536,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -13074,6 +13107,11 @@ }, "documentation":"

Contains the output of DescribeVpcEndpoints.

" }, + "DescribeVpcPeeringConnectionsMaxResults":{ + "type":"integer", + "max":1000, + "min":5 + }, "DescribeVpcPeeringConnectionsRequest":{ "type":"structure", "members":{ @@ -13091,6 +13129,14 @@ "shape":"ValueStringList", "documentation":"

One or more VPC peering connection IDs.

Default: Describes all your VPC peering connections.

", "locationName":"VpcPeeringConnectionId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to request the next page of results. (You received this token from a prior call.)

" + }, + "MaxResults":{ + "shape":"DescribeVpcPeeringConnectionsMaxResults", + "documentation":"

The maximum number of results to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

" } } }, @@ -13101,6 +13147,11 @@ "shape":"VpcPeeringConnectionList", "documentation":"

Information about the VPC peering connections.

", "locationName":"vpcPeeringConnectionSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" } } }, @@ -13732,7 +13783,7 @@ }, "ImportManifestUrl":{ "shape":"String", - "documentation":"

A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

For information about the import manifest referenced by this API action, see VM Import Manifest.

", + "documentation":"

A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

For information about the import manifest referenced by this API action, see VM Import Manifest.

", "locationName":"importManifestUrl" }, "Size":{ @@ -13763,7 +13814,7 @@ }, "ImportManifestUrl":{ "shape":"String", - "documentation":"

A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

For information about the import manifest referenced by this API action, see VM Import Manifest.

", + "documentation":"

A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

For information about the import manifest referenced by this API action, see VM Import Manifest.

", "locationName":"importManifestUrl" } }, @@ -13859,7 +13910,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most Regions. Maximum io1IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", + "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most Regions. Maximum io1IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", "locationName":"iops" }, "SnapshotId":{ @@ -13884,7 +13935,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" + "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" } }, "documentation":"

Describes a block device for an EBS volume.

" @@ -14524,7 +14575,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "S3Bucket":{ @@ -14603,7 +14654,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", "locationName":"clientToken" }, "ExcessCapacityTerminationPolicy":{ @@ -15445,7 +15496,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -15579,7 +15630,7 @@ "locationName":"configured" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "HibernationOptionsRequest":{ "type":"structure", @@ -15589,7 +15640,7 @@ "documentation":"

If you set this parameter to true, your instance is enabled for hibernation.

Default: false

" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "HistoryRecord":{ "type":"structure", @@ -15667,7 +15718,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"clientToken" }, "HostId":{ @@ -16355,7 +16406,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the destination AMI of the imported image should be encrypted. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Specifies whether the destination AMI of the imported image should be encrypted. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "Hypervisor":{ "shape":"String", @@ -16367,7 +16418,7 @@ }, "LicenseType":{ "shape":"String", - "documentation":"

The license type to be used for the Amazon Machine Image (AMI) after importing.

Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see Prerequisites in the VM Import/Export User Guide.

Valid values: AWS | BYOL

" + "documentation":"

The license type to be used for the Amazon Machine Image (AMI) after importing.

Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see Prerequisites in the VM Import/Export User Guide.

Valid values include:

Default value: Auto

" }, "Platform":{ "shape":"String", @@ -16559,7 +16610,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type. For more information about the instance types that you can import, see Instance Types in the VM Import/Export User Guide.

", + "documentation":"

The instance type. For more information about the instance types that you can import, see Instance Types in the VM Import/Export User Guide.

", "locationName":"instanceType" }, "Monitoring":{ @@ -16771,7 +16822,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the destination snapshot of the imported image should be encrypted. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Specifies whether the destination snapshot of the imported image should be encrypted. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "KmsKeyId":{ "shape":"String", @@ -17093,7 +17144,7 @@ }, "SourceDestCheck":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

", + "documentation":"

Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

", "locationName":"sourceDestCheck" }, "SpotInstanceRequestId":{ @@ -18020,9 +18071,7 @@ "r5.xlarge", "r5.2xlarge", "r5.4xlarge", - "r5.8xlarge", "r5.12xlarge", - "r5.16xlarge", "r5.24xlarge", "r5.metal", "r5a.large", @@ -18035,9 +18084,7 @@ "r5d.xlarge", "r5d.2xlarge", "r5d.4xlarge", - "r5d.8xlarge", "r5d.12xlarge", - "r5d.16xlarge", "r5d.24xlarge", "r5d.metal", "x1.16xlarge", @@ -18120,6 +18167,7 @@ "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", + "m5.metal", "m5a.large", "m5a.xlarge", "m5a.2xlarge", @@ -18132,6 +18180,7 @@ "m5d.4xlarge", "m5d.12xlarge", "m5d.24xlarge", + "m5d.metal", "h1.2xlarge", "h1.4xlarge", "h1.8xlarge", @@ -18142,6 +18191,7 @@ "z1d.3xlarge", "z1d.6xlarge", "z1d.12xlarge", + "z1d.metal", "u-6tb1.metal", "u-9tb1.metal", "u-12tb1.metal", @@ -18872,7 +18922,7 @@ "documentation":"

If you set this parameter to true, the instance is enabled for hibernation.

Default: false

" } }, - "documentation":"

Indicates whether the instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux.

" + "documentation":"

Indicates whether the instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux.

" }, "LaunchTemplateIamInstanceProfileSpecification":{ "type":"structure", @@ -19883,7 +19933,7 @@ }, "BlockDeviceMappings":{ "shape":"InstanceBlockDeviceMappingSpecificationList", - "documentation":"

Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"blockDeviceMapping" }, "DisableApiTermination":{ @@ -19923,17 +19973,17 @@ }, "InstanceType":{ "shape":"AttributeValue", - "documentation":"

Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

", + "documentation":"

Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

", "locationName":"instanceType" }, "Kernel":{ "shape":"AttributeValue", - "documentation":"

Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

", + "documentation":"

Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

", "locationName":"kernel" }, "Ramdisk":{ "shape":"AttributeValue", - "documentation":"

Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

", + "documentation":"

Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

", "locationName":"ramdisk" }, "SriovNetSupport":{ @@ -19994,7 +20044,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

" + "documentation":"

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

" }, "InstanceCreditSpecifications":{ "shape":"InstanceCreditSpecificationListRequest", @@ -20071,7 +20121,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraint: Maximum 128 ASCII characters.

" }, "LaunchTemplateId":{ "shape":"String", @@ -20129,7 +20179,7 @@ }, "SourceDestCheck":{ "shape":"AttributeBooleanValue", - "documentation":"

Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

", + "documentation":"

Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

", "locationName":"sourceDestCheck" } }, @@ -20149,7 +20199,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", "locationName":"clientToken" }, "TargetConfigurations":{ @@ -20345,7 +20395,7 @@ }, "Size":{ "shape":"Integer", - "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to than the existing size of the volume. For information about available EBS volume sizes, see Amazon EBS Volume Types.

Default: If no size is specified, the existing size is retained.

" + "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to than the existing size of the volume. For information about available EBS volume sizes, see Amazon EBS Volume Types.

Default: If no size is specified, the existing size is retained.

" }, "VolumeType":{ "shape":"VolumeType", @@ -20353,7 +20403,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The target IOPS rate of the volume.

This is only valid for Provisioned IOPS SSD (io1) volumes. For more information, see Provisioned IOPS SSD (io1) Volumes.

Default: If no IOPS value is specified, the existing value is retained.

" + "documentation":"

The target IOPS rate of the volume.

This is only valid for Provisioned IOPS SSD (io1) volumes. For more information, see Provisioned IOPS SSD (io1) Volumes.

Default: If no IOPS value is specified, the existing value is retained.

" } } }, @@ -20765,7 +20815,7 @@ }, "ProvisionedBandwidth":{ "shape":"ProvisionedBandwidth", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", "locationName":"provisionedBandwidth" }, "State":{ @@ -21404,6 +21454,11 @@ "documentation":"

Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet.

", "locationName":"singleInstanceType" }, + "SingleAvailabilityZone":{ + "shape":"Boolean", + "documentation":"

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.

", + "locationName":"singleAvailabilityZone" + }, "MinTargetCapacity":{ "shape":"Integer", "documentation":"

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

", @@ -21423,6 +21478,10 @@ "shape":"Boolean", "documentation":"

Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet.

" }, + "SingleAvailabilityZone":{ + "shape":"Boolean", + "documentation":"

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.

" + }, "MinTargetCapacity":{ "shape":"Integer", "documentation":"

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

" @@ -21957,31 +22016,31 @@ "members":{ "ProvisionTime":{ "shape":"DateTime", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", "locationName":"provisionTime" }, "Provisioned":{ "shape":"String", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", "locationName":"provisioned" }, "RequestTime":{ "shape":"DateTime", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", "locationName":"requestTime" }, "Requested":{ "shape":"String", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", "locationName":"requested" }, "Status":{ "shape":"String", - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", "locationName":"status" } }, - "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

" + "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

" }, "PublicIpStringList":{ "type":"list", @@ -22116,7 +22175,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

" }, "CurrencyCode":{ "shape":"CurrencyCodeValues", @@ -22141,7 +22200,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"clientToken" }, "CurrencyCode":{ @@ -22237,7 +22296,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -22915,7 +22974,7 @@ "members":{ "KernelId":{ "shape":"String", - "documentation":"

The ID of the kernel.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User Provided Kernels in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the kernel.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User Provided Kernels in the Amazon Elastic Compute Cloud User Guide.

" }, "EbsOptimized":{ "shape":"Boolean", @@ -22941,7 +23000,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

" }, "KeyName":{ "shape":"String", @@ -22957,7 +23016,7 @@ }, "RamDiskId":{ "shape":"String", - "documentation":"

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User Provided Kernels in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User Provided Kernels in the Amazon Elastic Compute Cloud User Guide.

" }, "DisableApiTermination":{ "shape":"Boolean", @@ -22969,7 +23028,7 @@ }, "UserData":{ "shape":"String", - "documentation":"

The Base64-encoded user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows).

" + "documentation":"

The Base64-encoded user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows).

" }, "TagSpecifications":{ "shape":"LaunchTemplateTagSpecificationRequestList", @@ -23006,15 +23065,15 @@ }, "CpuOptions":{ "shape":"LaunchTemplateCpuOptionsRequest", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" }, "CapacityReservationSpecification":{ "shape":"LaunchTemplateCapacityReservationSpecificationRequest", - "documentation":"

The Capacity Reservation targeting option.

" + "documentation":"

The Capacity Reservation targeting option. If you do not specify this parameter, the instance's Capacity Reservation preference defaults to open, which enables it to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).

" }, "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. Hibernation is currently supported only for Amazon Linux. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "LicenseSpecifications":{ "shape":"LaunchTemplateLicenseSpecificationListRequest", @@ -23067,7 +23126,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon EC2 User Guide for Linux Instances.

", "locationName":"clientToken" }, "DryRun":{ @@ -23442,7 +23501,7 @@ }, "InstanceCount":{ "shape":"Integer", - "documentation":"

The number of modified Reserved Instances.

", + "documentation":"

The number of modified Reserved Instances.

This is a required field for a request.

", "locationName":"instanceCount" }, "InstanceType":{ @@ -23500,7 +23559,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

", "locationName":"clientToken" }, "CreateDate":{ @@ -23563,7 +23622,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

", "locationName":"clientToken" }, "CreateDate":{ @@ -23887,6 +23946,7 @@ "ResourceType":{ "type":"string", "enum":[ + "client-vpn-endpoint", "customer-gateway", "dedicated-host", "dhcp-options", @@ -24056,7 +24116,7 @@ }, "CpuOptions":{ "shape":"LaunchTemplateCpuOptions", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"cpuOptions" }, "CapacityReservationSpecification":{ @@ -24066,7 +24126,7 @@ }, "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptions", - "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"hibernationOptions" }, "LicenseSpecifications":{ @@ -24459,20 +24519,20 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

Default: m1.small

" + "documentation":"

The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

Default: m1.small

" }, "Ipv6AddressCount":{ "shape":"Integer", - "documentation":"

[EC2-VPC] A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.

" + "documentation":"

[EC2-VPC] A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.

You cannot specify this option and the network interfaces option in the same request.

" }, "Ipv6Addresses":{ "shape":"InstanceIpv6AddressList", - "documentation":"

[EC2-VPC] Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.

", + "documentation":"

[EC2-VPC] Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.

You cannot specify this option and the network interfaces option in the same request.

", "locationName":"Ipv6Address" }, "KernelId":{ "shape":"String", - "documentation":"

The ID of the kernel.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the kernel.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

" }, "KeyName":{ "shape":"String", @@ -24496,25 +24556,25 @@ }, "RamdiskId":{ "shape":"String", - "documentation":"

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

" }, "SecurityGroupIds":{ "shape":"SecurityGroupIdStringList", - "documentation":"

One or more security group IDs. You can create a security group using CreateSecurityGroup.

Default: Amazon EC2 uses the default security group.

", + "documentation":"

One or more security group IDs. You can create a security group using CreateSecurityGroup.

Default: Amazon EC2 uses the default security group.

You cannot specify this option and the network interfaces option in the same request.

", "locationName":"SecurityGroupId" }, "SecurityGroups":{ "shape":"SecurityGroupStringList", - "documentation":"

[EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

Default: Amazon EC2 uses the default security group.

", + "documentation":"

[EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

You cannot specify this option and the network interfaces option in the same request.

Default: Amazon EC2 uses the default security group.

", "locationName":"SecurityGroup" }, "SubnetId":{ "shape":"String", - "documentation":"

[EC2-VPC] The ID of the subnet to launch the instance into.

" + "documentation":"

[EC2-VPC] The ID of the subnet to launch the instance into.

You cannot specify this option and the network interfaces option in the same request.

" }, "UserData":{ "shape":"String", - "documentation":"

The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.

" + "documentation":"

The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.

" }, "AdditionalInfo":{ "shape":"String", @@ -24523,7 +24583,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", "locationName":"clientToken" }, "DisableApiTermination":{ @@ -24553,12 +24613,12 @@ }, "NetworkInterfaces":{ "shape":"InstanceNetworkInterfaceSpecificationList", - "documentation":"

One or more network interfaces.

", + "documentation":"

One or more network interfaces.

You cannot specify this option and the network interfaces option in the same request.

", "locationName":"networkInterface" }, "PrivateIpAddress":{ "shape":"String", - "documentation":"

[EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet.

Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request.

", + "documentation":"

[EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet.

Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request.

You cannot specify this option and the network interfaces option in the same request.

", "locationName":"privateIpAddress" }, "ElasticGpuSpecification":{ @@ -24585,19 +24645,19 @@ }, "CreditSpecification":{ "shape":"CreditSpecificationRequest", - "documentation":"

The credit option for CPU usage of the instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard (T2 instances) or unlimited (T3 instances)

" + "documentation":"

The credit option for CPU usage of the instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard (T2 instances) or unlimited (T3 instances)

" }, "CpuOptions":{ "shape":"CpuOptionsRequest", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" }, "CapacityReservationSpecification":{ "shape":"CapacityReservationSpecification", - "documentation":"

Information about the Capacity Reservation targeting option.

" + "documentation":"

Information about the Capacity Reservation targeting option. If you do not specify this parameter, the instance's Capacity Reservation preference defaults to open, which enables it to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).

" }, "HibernationOptions":{ "shape":"HibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" }, "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", @@ -24615,7 +24675,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

", + "documentation":"

Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

", "idempotencyToken":true }, "DryRun":{ @@ -24653,7 +24713,7 @@ "members":{ "AWSAccessKeyId":{ "shape":"String", - "documentation":"

The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

" + "documentation":"

The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

" }, "Bucket":{ "shape":"String", @@ -24951,7 +25011,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about gp2 baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

Condition: This parameter is required for requests to create io1volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

" + "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about gp2 baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

Condition: This parameter is required for requests to create io1volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

" }, "SnapshotId":{ "shape":"String", @@ -25187,7 +25247,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

", + "documentation":"

One or more filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -25666,7 +25726,7 @@ }, "Format":{ "shape":"String", - "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK | OVA

" + "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK

" }, "Url":{ "shape":"String", @@ -25957,7 +26017,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency.

", "locationName":"clientToken" }, "ExcessCapacityTerminationPolicy":{ @@ -26012,7 +26072,7 @@ }, "Type":{ "shape":"FleetType", - "documentation":"

The type of request. Indicates whether the Spot Fleet only requests the target capacity or also attempts to maintain it. When this value is request, the Spot Fleet only places the required requests. It does not attempt to replenish Spot Instances if capacity is diminished, nor does it submit requests in alternative Spot pools if capacity is not available. To maintain a certain target capacity, the Spot Fleet places the required requests to meet capacity and automatically replenishes any interrupted instances. Default: maintain.

", + "documentation":"

The type of request. Indicates whether the Spot Fleet only requests the target capacity or also attempts to maintain it. When this value is request, the Spot Fleet only places the required requests. It does not attempt to replenish Spot Instances if capacity is diminished, nor does it submit requests in alternative Spot pools if capacity is not available. When this value is maintain, the Spot Fleet maintains the target capacity. The Spot Fleet places the required requests to meet capacity and automatically replenishes any interrupted instances. Default: maintain. instant is listed but is not used by Spot Fleet.

", "locationName":"type" }, "ValidFrom":{ @@ -26151,7 +26211,7 @@ }, "State":{ "shape":"SpotInstanceState", - "documentation":"

The state of the Spot Instance request. Spot status information helps track your Spot Instance requests. For more information, see Spot Status in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The state of the Spot Instance request. Spot status information helps track your Spot Instance requests. For more information, see Spot Status in the Amazon EC2 User Guide for Linux Instances.

", "locationName":"state" }, "Status":{ @@ -26232,7 +26292,7 @@ "members":{ "Code":{ "shape":"String", - "documentation":"

The status code. For a list of status codes, see Spot Status Codes in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The status code. For a list of status codes, see Spot Status Codes in the Amazon EC2 User Guide for Linux Instances.

", "locationName":"code" }, "Message":{ @@ -26304,6 +26364,11 @@ "documentation":"

Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet.

", "locationName":"singleInstanceType" }, + "SingleAvailabilityZone":{ + "shape":"Boolean", + "documentation":"

Indicates that the fleet launches all Spot Instances into a single Availability Zone.

", + "locationName":"singleAvailabilityZone" + }, "MinTargetCapacity":{ "shape":"Integer", "documentation":"

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

", @@ -26331,6 +26396,10 @@ "shape":"Boolean", "documentation":"

Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet.

" }, + "SingleAvailabilityZone":{ + "shape":"Boolean", + "documentation":"

Indicates that the fleet launches all Spot Instances into a single Availability Zone.

" + }, "MinTargetCapacity":{ "shape":"Integer", "documentation":"

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

" @@ -26575,7 +26644,7 @@ }, "Hibernate":{ "shape":"Boolean", - "documentation":"

Hibernates the instance if the instance was enabled for hibernation at launch. If the instance cannot hibernate successfully, a normal shutdown occurs. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

Default: false

" + "documentation":"

Hibernates the instance if the instance was enabled for hibernation at launch. If the instance cannot hibernate successfully, a normal shutdown occurs. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

Default: false

" }, "DryRun":{ "shape":"Boolean", @@ -28000,7 +28069,7 @@ "locationName":"message" } }, - "documentation":"

Information about the error that occurred. For more information about errors, see Error Codes.

" + "documentation":"

Information about the error that occurred. For more information about errors, see Error Codes.

" }, "UnsuccessfulItemList":{ "type":"list", @@ -28303,7 +28372,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most regions. Maximum io1IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", + "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most regions. Maximum io1IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", "locationName":"iops" }, "Tags":{ @@ -29173,7 +29242,7 @@ }, "Category":{ "shape":"String", - "documentation":"

The category of the VPN connection. A value of VPN indicates an AWS VPN connection. A value of VPN-Classic indicates an AWS Classic VPN connection. For more information, see AWS Managed VPN Categories in the Amazon Virtual Private Cloud User Guide.

", + "documentation":"

The category of the VPN connection. A value of VPN indicates an AWS VPN connection. A value of VPN-Classic indicates an AWS Classic VPN connection.

", "locationName":"category" }, "State":{ diff --git a/botocore/data/ecr/2015-09-21/service-2.json b/botocore/data/ecr/2015-09-21/service-2.json index 85a9a90b..419c1801 100644 --- a/botocore/data/ecr/2015-09-21/service-2.json +++ b/botocore/data/ecr/2015-09-21/service-2.json @@ -2,13 +2,14 @@ "version":"2.0", "metadata":{ "apiVersion":"2015-09-21", - "endpointPrefix":"ecr", + "endpointPrefix":"api.ecr", "jsonVersion":"1.1", "protocol":"json", "serviceAbbreviation":"Amazon ECR", "serviceFullName":"Amazon EC2 Container Registry", "serviceId":"ECR", "signatureVersion":"v4", + "signingName":"ecr", "targetPrefix":"AmazonEC2ContainerRegistry_V20150921", "uid":"ecr-2015-09-21" }, diff --git a/botocore/data/ecs/2014-11-13/paginators-1.json b/botocore/data/ecs/2014-11-13/paginators-1.json index 43ae3dec..6aa47513 100644 --- a/botocore/data/ecs/2014-11-13/paginators-1.json +++ b/botocore/data/ecs/2014-11-13/paginators-1.json @@ -35,6 +35,18 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "serviceArns" + }, + "ListAccountSettings": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "settings" + }, + "ListAttributes": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "attributes" } } } diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index f9e25c94..9bdec2fd 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -46,7 +46,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state; tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and they are reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using the CODE_DEPLOY deployment controller and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are only used to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values are not used, although they are currently visible when describing your service.

Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" + "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state; tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and they are reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using the CODE_DEPLOY deployment controller and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are only used to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values are not used, although they are currently visible when describing your service.

Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" }, "DeleteAccountSetting":{ "name":"DeleteAccountSetting", @@ -295,7 +295,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Returns a list of container instances in a specified cluster. You can filter the results of a ListContainerInstances operation with cluster query language statements inside the filter parameter. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Returns a list of container instances in a specified cluster. You can filter the results of a ListContainerInstances operation with cluster query language statements inside the filter parameter. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" }, "ListServices":{ "name":"ListServices", @@ -389,7 +389,22 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Modifies the ARN and resource ID format of a resource for a specified IAM user, IAM role, or the root user for an account. You can specify whether the new ARN and resource ID format are enabled for new resources that are created. Enabling this setting is required to use new Amazon ECS features such as resource tagging.

" + "documentation":"

Modifies the ARN and resource ID format of a resource type for a specified IAM user, IAM role, or the root user for an account. If the account setting for the root user is changed, it sets the default setting for all of the IAM users and roles for which no individual account setting has been set. The opt-in and opt-out account setting can be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. Enabling this setting is required to use new Amazon ECS features such as resource tagging. For more information, see Amazon Resource Names (ARNs) and IDs in the Amazon Elastic Container Service Developer Guide.

" + }, + "PutAccountSettingDefault":{ + "name":"PutAccountSettingDefault", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAccountSettingDefaultRequest"}, + "output":{"shape":"PutAccountSettingDefaultResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Modifies the ARN and resource ID format of a resource type for all IAM users on an account for which no individual account setting has been set. Enabling this setting is required to use new Amazon ECS features such as resource tagging.

" }, "PutAttributes":{ "name":"PutAttributes", @@ -405,7 +420,7 @@ {"shape":"AttributeLimitExceededException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Create or update an attribute on an Amazon ECS resource. If the attribute does not exist, it is created. If the attribute exists, its value is replaced with the specified value. To delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Create or update an attribute on an Amazon ECS resource. If the attribute does not exist, it is created. If the attribute exists, its value is replaced with the specified value. To delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" }, "RegisterContainerInstance":{ "name":"RegisterContainerInstance", @@ -435,7 +450,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" }, "RunTask":{ "name":"RunTask", @@ -456,7 +471,7 @@ {"shape":"AccessDeniedException"}, {"shape":"BlockedException"} ], - "documentation":"

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

The Amazon ECS API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

" + "documentation":"

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

The Amazon ECS API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

" }, "StartTask":{ "name":"StartTask", @@ -472,7 +487,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Starts a new task from the specified task definition on the specified container instance or instances.

Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Starts a new task from the specified task definition on the specified container instance or instances.

Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "StopTask":{ "name":"StopTask", @@ -488,7 +503,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Stops a running task. Any tags associated with the task will be deleted.

When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Stops a running task. Any tags associated with the task will be deleted.

When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "SubmitContainerStateChange":{ "name":"SubmitContainerStateChange", @@ -571,7 +586,7 @@ {"shape":"NoUpdateAvailableException"}, {"shape":"MissingVersionException"} ], - "documentation":"

Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide.

" }, "UpdateContainerInstancesState":{ "name":"UpdateContainerInstancesState", @@ -710,7 +725,7 @@ "documentation":"

The ID of the target. You can specify the short form ID for a resource or the full Amazon Resource Name (ARN).

" } }, - "documentation":"

An attribute is a name-value pair associated with an Amazon ECS object. Attributes enable you to extend the Amazon ECS data model by adding custom metadata to your resources. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

An attribute is a name-value pair associated with an Amazon ECS object. Attributes enable you to extend the Amazon ECS data model by adding custom metadata to your resources. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" }, "AttributeLimitExceededException":{ "type":"structure", @@ -729,11 +744,11 @@ "members":{ "subnets":{ "shape":"StringList", - "documentation":"

The subnets associated with the task or service. There is a limit of 16 subnets able to be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" + "documentation":"

The subnets associated with the task or service. There is a limit of 16 subnets that can be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" }, "securityGroups":{ "shape":"StringList", - "documentation":"

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of five security groups able to be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

" + "documentation":"

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups that can be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

" }, "assignPublicIp":{ "shape":"AssignPublicIp", @@ -907,6 +922,22 @@ "healthStatus":{ "shape":"HealthStatus", "documentation":"

The health status of the container. If health checks are not configured for this container in its task definition, then it reports the health status as UNKNOWN.

" + }, + "cpu":{ + "shape":"String", + "documentation":"

The number of CPU units set for the container. The value will be 0 if no value was specified in the container definition when the task definition was registered.

" + }, + "memory":{ + "shape":"String", + "documentation":"

The hard limit (in MiB) of memory set for the container.

" + }, + "memoryReservation":{ + "shape":"String", + "documentation":"

The soft limit (in MiB) of memory set for the container.

" + }, + "gpuIds":{ + "shape":"GpuIds", + "documentation":"

The IDs of each GPU assigned to the container.

" } }, "documentation":"

A Docker container that is part of a task.

" @@ -948,7 +979,7 @@ }, "essential":{ "shape":"BoxedBoolean", - "documentation":"

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, then its failure does not affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that is composed of multiple containers, you should group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, then its failure does not affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that is composed of multiple containers, you should group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

" }, "entryPoint":{ "shape":"StringList", @@ -956,7 +987,7 @@ }, "command":{ "shape":"StringList", - "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

" + "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument should be a separated string in the array.

" }, "environment":{ "shape":"EnvironmentVariables", @@ -976,7 +1007,7 @@ }, "secrets":{ "shape":"SecretList", - "documentation":"

The secrets to pass to the container.

" + "documentation":"

The secrets to pass to the container. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" }, "hostname":{ "shape":"String", @@ -1016,7 +1047,7 @@ }, "dockerSecurityOptions":{ "shape":"StringList", - "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" }, "interactive":{ "shape":"BoxedBoolean", @@ -1036,7 +1067,7 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

If you are using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

If you are using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheck":{ "shape":"HealthCheck", @@ -1045,6 +1076,10 @@ "systemControls":{ "shape":"SystemControls", "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that is started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

" + }, + "resourceRequirements":{ + "shape":"ResourceRequirements", + "documentation":"

The type and amount of a resource to assign to a container. The only supported resource is a GPU.

" } }, "documentation":"

Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

" @@ -1082,7 +1117,7 @@ }, "status":{ "shape":"String", - "documentation":"

The status of the container instance. The valid values are ACTIVE, INACTIVE, or DRAINING. ACTIVE indicates that the container instance can accept tasks. DRAINING indicates that new tasks are not placed on the container instance and any service tasks running on the container instance are removed if possible. For more information, see Container Instance Draining in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The status of the container instance. The valid values are ACTIVE, INACTIVE, or DRAINING. ACTIVE indicates that the container instance can accept tasks. DRAINING indicates that new tasks are not placed on the container instance and any service tasks running on the container instance are removed if possible. For more information, see Container Instance Draining in the Amazon Elastic Container Service Developer Guide.

" }, "agentConnected":{ "shape":"Boolean", @@ -1164,6 +1199,10 @@ "memoryReservation":{ "shape":"BoxedInteger", "documentation":"

The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name.

" + }, + "resourceRequirements":{ + "shape":"ResourceRequirements", + "documentation":"

The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.

" } }, "documentation":"

The overrides that should be sent to a container.

" @@ -1265,15 +1304,15 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type on which to run your service. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type on which to run your service. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "role":{ "shape":"String", - "documentation":"

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode, in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

" + "documentation":"

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode, in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

" }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -1309,11 +1348,11 @@ }, "enableECSManagedTags":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" }, "propagateTags":{ "shape":"PropagateTags", - "documentation":"

Specifies whether to propagate the tags from the task definition or the service to the tasks. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service during service creation. To add tags to a task after service creation, use the TagResource API action.

" + "documentation":"

Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service during service creation. To add tags to a task after service creation, use the TagResource API action.

" } } }, @@ -1455,11 +1494,11 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type the tasks in the service are using. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type the tasks in the service are using. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -1819,6 +1858,10 @@ "type":"list", "member":{"shape":"Failure"} }, + "GpuIds":{ + "type":"list", + "member":{"shape":"String"} + }, "HealthCheck":{ "type":"structure", "required":["command"], @@ -2083,7 +2126,7 @@ }, "filter":{ "shape":"String", - "documentation":"

You can filter the results of a ListContainerInstances operation with cluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

You can filter the results of a ListContainerInstances operation with cluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" }, "nextToken":{ "shape":"String", @@ -2327,7 +2370,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default. If you are using the Fargate launch type, the only supported value is awslogs. For more information about using the awslogs driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default. If you are using the Fargate launch type, the only supported value is awslogs. For more information about using the awslogs driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -2473,10 +2516,10 @@ }, "expression":{ "shape":"String", - "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" }, "PlacementConstraintType":{ "type":"string", @@ -2505,7 +2548,7 @@ "documentation":"

The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used.

" } }, - "documentation":"

The task placement strategy for a task or service. For more information, see Task Placement Strategies in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The task placement strategy for a task or service. For more information, see Task Placement Strategies in the Amazon Elastic Container Service Developer Guide.

" }, "PlacementStrategyType":{ "type":"string", @@ -2515,6 +2558,32 @@ "binpack" ] }, + "PlatformDevice":{ + "type":"structure", + "required":[ + "id", + "type" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID for the GPU(s) on the container instance. The available GPU IDs can also be obtained on the container instance in the /var/lib/ecs/gpu/nvidia_gpu_info.json file.

" + }, + "type":{ + "shape":"PlatformDeviceType", + "documentation":"

The type of device that is available on the container instance. The only supported value is GPU.

" + } + }, + "documentation":"

The devices that are available on the container instance. The only supported device type is a GPU.

" + }, + "PlatformDeviceType":{ + "type":"string", + "enum":["GPU"] + }, + "PlatformDevices":{ + "type":"list", + "member":{"shape":"PlatformDevice"} + }, "PlatformTaskDefinitionIncompatibilityException":{ "type":"structure", "members":{ @@ -2538,7 +2607,7 @@ }, "hostPort":{ "shape":"BoxedInteger", - "documentation":"

The port number on the container instance to reserve for your container.

If you are using containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.

If you are using containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default ephemeral port range from 49153 through 65535 is always used for Docker versions before 1.6.0.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678 and 51679. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released). The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance may have up to 100 reserved ports at a time, including the default reserved ports. Aautomatically assigned ports do not count toward the 100 reserved ports limit.

" + "documentation":"

The port number on the container instance to reserve for your container.

If you are using containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.

If you are using containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default ephemeral port range from 49153 through 65535 is always used for Docker versions before 1.6.0.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released). The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time, including the default reserved ports. Automatically assigned ports don't count toward the 100 reserved ports limit.

" }, "protocol":{ "shape":"TransportProtocol", @@ -2558,6 +2627,29 @@ "SERVICE" ] }, + "PutAccountSettingDefaultRequest":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"SettingName", + "documentation":"

The resource type to enable the new format for. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks are affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances are affected.

" + }, + "value":{ + "shape":"String", + "documentation":"

The account setting value for the specified principal ARN. Accepted values are enabled and disabled.

" + } + } + }, + "PutAccountSettingDefaultResponse":{ + "type":"structure", + "members":{ + "setting":{"shape":"Setting"} + } + }, "PutAccountSettingRequest":{ "type":"structure", "required":[ @@ -2575,7 +2667,7 @@ }, "principalArn":{ "shape":"String", - "documentation":"

The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you specify the root user, it modifies the ARN and resource ID format for all IAM users, IAM roles, and the root user of the account unless an IAM user or role explicitly overrides these settings for themselves. If this field is omitted, the setting are changed only for the authenticated user.

" + "documentation":"

The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you specify the root user, it modifies the ARN and resource ID format for all IAM users, IAM roles, and the root user of the account unless an IAM user or role explicitly overrides these settings for themselves. If this field is omitted, the settings are changed only for the authenticated user.

" } } }, @@ -2642,6 +2734,10 @@ "shape":"Attributes", "documentation":"

The container instance attributes that this container instance supports.

" }, + "platformDevices":{ + "shape":"PlatformDevices", + "documentation":"

The devices that are available on the container instance. The only supported device type is a GPU.

" + }, "tags":{ "shape":"Tags", "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" @@ -2670,7 +2766,7 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", @@ -2783,6 +2879,32 @@ "documentation":"

The specified resource could not be found.

", "exception":true }, + "ResourceRequirement":{ + "type":"structure", + "required":[ + "value", + "type" + ], + "members":{ + "value":{ + "shape":"String", + "documentation":"

The number of physical GPUs the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.

" + }, + "type":{ + "shape":"ResourceType", + "documentation":"

The type of resource to assign to a container. The only supported value is GPU.

" + } + }, + "documentation":"

The type and amount of a resource to assign to a container. The only supported resource is a GPU. For more information, see Working with GPUs on Amazon ECS in the Amazon Elastic Container Service Developer Guide

" + }, + "ResourceRequirements":{ + "type":"list", + "member":{"shape":"ResourceRequirement"} + }, + "ResourceType":{ + "type":"string", + "enum":["GPU"] + }, "Resources":{ "type":"list", "member":{"shape":"Resource"} @@ -2825,11 +2947,11 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type on which to run your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type on which to run your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version the task should run. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version the task should run. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -2841,11 +2963,11 @@ }, "enableECSManagedTags":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" }, "propagateTags":{ "shape":"PropagateTags", - "documentation":"

Specifies whether to propagate the tags from the task definition or the service to the task. If no value is specified, the tags are not propagated.

" + "documentation":"

Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

An error will be received if you specify the SERVICE option when running a task.

" } } }, @@ -2907,10 +3029,10 @@ }, "valueFrom":{ "shape":"String", - "documentation":"

The secret to expose to the container. Supported values are either the full ARN or the name of the parameter in the AWS Systems Manager Parameter Store.

" + "documentation":"

The secret to expose to the container. If your task is using the EC2 launch type, then supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store. If your task is using the Fargate launch type, then the only supported value is the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the task you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" } }, - "documentation":"

An object representing the secret to expose to your container.

" + "documentation":"

An object representing the secret to expose to your container. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" }, "SecretList":{ "type":"list", @@ -2966,11 +3088,11 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type on which your service is running. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type on which your service is running. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "taskDefinition":{ "shape":"String", @@ -3034,7 +3156,7 @@ }, "enableECSManagedTags":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable Amazon ECS managed tags for the tasks in the service. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Specifies whether to enable Amazon ECS managed tags for the tasks in the service. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" }, "propagateTags":{ "shape":"PropagateTags", @@ -3202,7 +3324,7 @@ }, "enableECSManagedTags":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" }, "propagateTags":{ "shape":"PropagateTags", @@ -3544,11 +3666,11 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type on which your task is running. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type on which your task is running. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "attachments":{ "shape":"Attachments", @@ -3574,7 +3696,7 @@ }, "containerDefinitions":{ "shape":"ContainerDefinitions", - "documentation":"

A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" }, "family":{ "shape":"String", @@ -3582,7 +3704,7 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The ARN of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The ARN of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", @@ -3598,7 +3720,7 @@ }, "volumes":{ "shape":"VolumeList", - "documentation":"

The list of volumes in a task.

If you are using the Fargate launch type, the host and sourcePath parameters are not supported.

For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The list of volumes in a task.

If you are using the Fargate launch type, the host and sourcePath parameters are not supported.

For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" }, "status":{ "shape":"TaskDefinitionStatus", @@ -3614,7 +3736,7 @@ }, "compatibilities":{ "shape":"CompatibilityList", - "documentation":"

The launch type to use with your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type to use with your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "requiresCompatibilities":{ "shape":"CompatibilityList", @@ -3664,10 +3786,10 @@ }, "expression":{ "shape":"String", - "documentation":"

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

An object representing a constraint on task placement in the task definition.

If you are using the Fargate launch type, task placement constraints are not supported.

For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

An object representing a constraint on task placement in the task definition.

If you are using the Fargate launch type, task placement constraints are not supported.

For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" }, "TaskDefinitionPlacementConstraintType":{ "type":"string", @@ -3759,11 +3881,11 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type the tasks in the task set are using. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type the tasks in the task set are using. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which the tasks in the task set are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which the tasks in the task set are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -4008,7 +4130,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" }, "forceNewDeployment":{ "shape":"Boolean", diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index 3166d333..08b817d6 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"InsufficientThroughputCapacity"}, {"shape":"ThroughputLimitExceeded"} ], - "documentation":"

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation also takes an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS: Performance Modes.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC via the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

" + "documentation":"

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation also takes an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS: Performance Modes.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

" }, "CreateMountTarget":{ "name":"CreateMountTarget", @@ -53,7 +53,7 @@ {"shape":"SecurityGroupNotFound"}, {"shape":"UnsupportedAvailabilityZone"} ], - "documentation":"

Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. For more information, see Amazon EFS: How it Works.

In the request, you also specify a file system ID for which you are creating the mount target and the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, you also provide a subnet ID, which determines the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system via the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you won't be able to access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

" + "documentation":"

Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. For more information, see Amazon EFS: How it Works.

In the request, you also specify a file system ID for which you are creating the mount target and the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, you also provide a subnet ID, which determines the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

" }, "CreateTags":{ "name":"CreateTags", @@ -100,7 +100,7 @@ {"shape":"DependencyTimeout"}, {"shape":"MountTargetNotFound"} ], - "documentation":"

Deletes the specified mount target.

This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target.

This operation requires permissions for the following action on the file system:

The DeleteMountTarget call returns while the mount target state is still deleting. You can check the mount target deletion by calling the DescribeMountTargets operation, which returns a list of mount target descriptions for the given file system.

The operation also requires permissions for the following Amazon EC2 action on the mount target's network interface:

" + "documentation":"

Deletes the specified mount target.

This operation forcibly breaks any mounts of the file system by using the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes might be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC by using another mount target.

This operation requires permissions for the following action on the file system:

The DeleteMountTarget call returns while the mount target state is still deleting. You can check the mount target deletion by calling the DescribeMountTargets operation, which returns a list of mount target descriptions for the given file system.

The operation also requires permissions for the following Amazon EC2 action on the mount target's network interface:

" }, "DeleteTags":{ "name":"DeleteTags", @@ -115,7 +115,7 @@ {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that does not exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag Restrictions in the AWS Billing and Cost Management User Guide.

This operation requires permissions for the elasticfilesystem:DeleteTags action.

" + "documentation":"

Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag Restrictions in the AWS Billing and Cost Management User Guide.

This operation requires permissions for the elasticfilesystem:DeleteTags action.

" }, "DescribeFileSystems":{ "name":"DescribeFileSystems", @@ -131,7 +131,23 @@ {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker.

To retrieve a list of your file system descriptions, this operation is used in an iterative process, where DescribeFileSystems is called first without the Marker and then the operation continues to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker.

The implementation may return fewer than MaxItems file system descriptions while still including a NextMarker value.

The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multi-call iteration is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeFileSystems action.

" + "documentation":"

Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. Currently, this number is automatically set to 10. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker.

To retrieve a list of your file system descriptions, this operation is used in an iterative process, where DescribeFileSystems is called first without the Marker and then the operation continues to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker.

The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multi-call iteration is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeFileSystems action.

" + }, + "DescribeLifecycleConfiguration":{ + "name":"DescribeLifecycleConfiguration", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/lifecycle-configuration", + "responseCode":200 + }, + "input":{"shape":"DescribeLifecycleConfigurationRequest"}, + "output":{"shape":"LifecycleConfigurationDescription"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"} + ], + "documentation":"

Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. EFS lifecycle management uses the LifecycleConfiguration object to identify which files to move to the EFS Infrequent Access (IA) storage class. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response.

This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.

" }, "DescribeMountTargetSecurityGroups":{ "name":"DescribeMountTargetSecurityGroups", @@ -181,7 +197,7 @@ {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

Returns the tags associated with a file system. The order of tags returned in the response of one DescribeTags call and the order of tags returned across the responses of a multi-call iteration (when using pagination) is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeTags action.

" + "documentation":"

Returns the tags associated with a file system. The order of tags returned in the response of one DescribeTags call and the order of tags returned across the responses of a multiple-call iteration (when using pagination) is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeTags action.

" }, "ModifyMountTargetSecurityGroups":{ "name":"ModifyMountTargetSecurityGroups", @@ -201,6 +217,23 @@ ], "documentation":"

Modifies the set of security groups in effect for a mount target.

When you create a mount target, Amazon EFS also creates a new network interface. For more information, see CreateMountTarget. This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

The operation requires permissions for the following actions:

" }, + "PutLifecycleConfiguration":{ + "name":"PutLifecycleConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/lifecycle-configuration", + "responseCode":200 + }, + "input":{"shape":"PutLifecycleConfigurationRequest"}, + "output":{"shape":"LifecycleConfigurationDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"IncorrectFileSystemLifeCycleState"} + ], + "documentation":"

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

You can enable lifecycle management only for EFS file systems created after the release of EFS infrequent access.

In the request, specify the following:

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

" + }, "UpdateFileSystem":{ "name":"UpdateFileSystem", "http":{ @@ -241,19 +274,19 @@ "members":{ "CreationToken":{ "shape":"CreationToken", - "documentation":"

String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.

" + "documentation":"

A string of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.

" }, "PerformanceMode":{ "shape":"PerformanceMode", - "documentation":"

The PerformanceMode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. This can't be changed after the file system has been created.

" + "documentation":"

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

" }, "Encrypted":{ "shape":"Encrypted", - "documentation":"

A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying a CreateFileSystemRequest$KmsKeyId for an existing AWS Key Management Service (AWS KMS) customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system.

" + "documentation":"

A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying CreateFileSystemRequest$KmsKeyId for an existing AWS Key Management Service (AWS KMS) customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

" + "documentation":"

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a nondefault CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

" }, "ThroughputMode":{ "shape":"ThroughputMode", @@ -261,7 +294,11 @@ }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" + "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A value that specifies to create one or more tags associated with the file system. Each tag is a user-defined key-value pair. Name your file system on creation by including a \"Key\":\"Name\",\"Value\":\"{value}\" key-value pair.

" } } }, @@ -274,11 +311,11 @@ "members":{ "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system for which to create the mount target.

" + "documentation":"

The ID of the file system for which to create the mount target.

" }, "SubnetId":{ "shape":"SubnetId", - "documentation":"

ID of the subnet to add the mount target in.

" + "documentation":"

The ID of the subnet to add the mount target in.

" }, "IpAddress":{ "shape":"IpAddress", @@ -300,13 +337,13 @@ "members":{ "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system whose tags you want to modify (String). This operation modifies the tags only, not the file system.

", + "documentation":"

The ID of the file system whose tags you want to modify (String). This operation modifies the tags only, not the file system.

", "location":"uri", "locationName":"FileSystemId" }, "Tags":{ "shape":"Tags", - "documentation":"

Array of Tag objects to add. Each Tag object is a key-value pair.

" + "documentation":"

An array of Tag objects to add. Each Tag object is a key-value pair.

" } }, "documentation":"

" @@ -322,7 +359,7 @@ "members":{ "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system you want to delete.

", + "documentation":"

The ID of the file system you want to delete.

", "location":"uri", "locationName":"FileSystemId" } @@ -335,7 +372,7 @@ "members":{ "MountTargetId":{ "shape":"MountTargetId", - "documentation":"

ID of the mount target to delete (String).

", + "documentation":"

The ID of the mount target to delete (String).

", "location":"uri", "locationName":"MountTargetId" } @@ -351,13 +388,13 @@ "members":{ "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system whose tags you want to delete (String).

", + "documentation":"

The ID of the file system whose tags you want to delete (String).

", "location":"uri", "locationName":"FileSystemId" }, "TagKeys":{ "shape":"TagKeys", - "documentation":"

List of tag keys to delete.

" + "documentation":"

A list of tag keys to delete.

" } }, "documentation":"

" @@ -378,7 +415,7 @@ "members":{ "MaxItems":{ "shape":"MaxItems", - "documentation":"

(Optional) Specifies the maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon EFS returns is the minimum of the MaxItems parameter specified in the request and the service's internal maximum number of items per page.

", + "documentation":"

(Optional) Specifies the maximum number of file systems to return in the response (integer). Currently, this number is automatically set to 10.

", "location":"querystring", "locationName":"MaxItems" }, @@ -412,7 +449,7 @@ }, "FileSystems":{ "shape":"FileSystemDescriptions", - "documentation":"

Array of file system descriptions.

" + "documentation":"

An array of file system descriptions.

" }, "NextMarker":{ "shape":"Marker", @@ -420,13 +457,25 @@ } } }, + "DescribeLifecycleConfigurationRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the file system whose LifecycleConfiguration object you want to retrieve (String).

", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, "DescribeMountTargetSecurityGroupsRequest":{ "type":"structure", "required":["MountTargetId"], "members":{ "MountTargetId":{ "shape":"MountTargetId", - "documentation":"

ID of the mount target whose security groups you want to retrieve.

", + "documentation":"

The ID of the mount target whose security groups you want to retrieve.

", "location":"uri", "locationName":"MountTargetId" } @@ -439,7 +488,7 @@ "members":{ "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

Array of security groups.

" + "documentation":"

An array of security groups.

" } } }, @@ -448,7 +497,7 @@ "members":{ "MaxItems":{ "shape":"MaxItems", - "documentation":"

(Optional) Maximum number of mount targets to return in the response. It must be an integer with a value greater than zero.

", + "documentation":"

(Optional) Maximum number of mount targets to return in the response. Currently, this number is automatically set to 10.

", "location":"querystring", "locationName":"MaxItems" }, @@ -497,19 +546,19 @@ "members":{ "MaxItems":{ "shape":"MaxItems", - "documentation":"

(Optional) Maximum number of file system tags to return in the response. It must be an integer with a value greater than zero.

", + "documentation":"

(Optional) The maximum number of file system tags to return in the response. Currently, this number is automatically set to 10.

", "location":"querystring", "locationName":"MaxItems" }, "Marker":{ "shape":"Marker", - "documentation":"

(Optional) Opaque pagination token returned from a previous DescribeTags operation (String). If present, it specifies to continue the list from where the previous call left off.

", + "documentation":"

(Optional) An opaque pagination token returned from a previous DescribeTags operation (String). If present, it specifies to continue the list from where the previous call left off.

", "location":"querystring", "locationName":"Marker" }, "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system whose tag set you want to retrieve.

", + "documentation":"

The ID of the file system whose tag set you want to retrieve.

", "location":"uri", "locationName":"FileSystemId" } @@ -566,44 +615,45 @@ "LifeCycleState", "NumberOfMountTargets", "SizeInBytes", - "PerformanceMode" + "PerformanceMode", + "Tags" ], "members":{ "OwnerId":{ "shape":"AwsAccountId", - "documentation":"

AWS account that created the file system. If the file system was created by an IAM user, the parent account to which the user belongs is the owner.

" + "documentation":"

The AWS account that created the file system. If the file system was created by an IAM user, the parent account to which the user belongs is the owner.

" }, "CreationToken":{ "shape":"CreationToken", - "documentation":"

Opaque string specified in the request.

" + "documentation":"

The opaque string specified in the request.

" }, "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system, assigned by Amazon EFS.

" + "documentation":"

The ID of the file system, assigned by Amazon EFS.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).

" + "documentation":"

The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).

" }, "LifeCycleState":{ "shape":"LifeCycleState", - "documentation":"

Lifecycle phase of the file system.

" + "documentation":"

The lifecycle phase of the file system.

" }, "Name":{ "shape":"TagValue", - "documentation":"

You can add tags to a file system, including a Name tag. For more information, see CreateTags. If the file system has a Name tag, Amazon EFS returns the value in this field.

" + "documentation":"

You can add tags to a file system, including a Name tag. For more information, see CreateFileSystem. If the file system has a Name tag, Amazon EFS returns the value in this field.

" }, "NumberOfMountTargets":{ "shape":"MountTargetCount", - "documentation":"

Current number of mount targets that the file system has. For more information, see CreateMountTarget.

" + "documentation":"

The current number of mount targets that the file system has. For more information, see CreateMountTarget.

" }, "SizeInBytes":{ "shape":"FileSystemSize", - "documentation":"

Latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z. The SizeInBytes value doesn't represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, SizeInBytes represents actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not the exact size that the file system was at any point in time.

" + "documentation":"

The latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z. The SizeInBytes value doesn't represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, SizeInBytes represents actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not the exact size that the file system was at any point in time.

" }, "PerformanceMode":{ "shape":"PerformanceMode", - "documentation":"

The PerformanceMode of the file system.

" + "documentation":"

The performance mode of the file system.

" }, "Encrypted":{ "shape":"Encrypted", @@ -619,10 +669,14 @@ }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" + "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags associated with the file system, presented as an array of Tag objects.

" } }, - "documentation":"

Description of the file system.

" + "documentation":"

A description of the file system.

" }, "FileSystemDescriptions":{ "type":"list", @@ -662,20 +716,32 @@ "error":{"httpStatusCode":404}, "exception":true }, + "FileSystemNullableSizeValue":{ + "type":"long", + "min":0 + }, "FileSystemSize":{ "type":"structure", "required":["Value"], "members":{ "Value":{ "shape":"FileSystemSizeValue", - "documentation":"

Latest known metered size (in bytes) of data stored in the file system.

" + "documentation":"

The latest known metered size (in bytes) of data stored in the file system.

" }, "Timestamp":{ "shape":"Timestamp", - "documentation":"

Time at which the size of data, returned in the Value field, was determined. The value is the integer number of seconds since 1970-01-01T00:00:00Z.

" + "documentation":"

The time at which the size of data, returned in the Value field, was determined. The value is the integer number of seconds since 1970-01-01T00:00:00Z.

" + }, + "ValueInIA":{ + "shape":"FileSystemNullableSizeValue", + "documentation":"

The latest known metered size (in bytes) of data stored in the Infrequent Access storage class.

" + }, + "ValueInStandard":{ + "shape":"FileSystemNullableSizeValue", + "documentation":"

The latest known metered size (in bytes) of data stored in the Standard storage class.

" } }, - "documentation":"

Latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. Note that the value does not represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value will represent the actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not necessarily the exact size the file system was at any instant in time.

" + "documentation":"

The latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. The value doesn't represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value represents the actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not necessarily the exact size the file system was at any instant in time.

" }, "FileSystemSizeValue":{ "type":"long", @@ -752,6 +818,29 @@ "deleted" ] }, + "LifecycleConfigurationDescription":{ + "type":"structure", + "members":{ + "LifecyclePolicies":{ + "shape":"LifecyclePolicies", + "documentation":"

An array of lifecycle management policies. Currently, EFS supports a maximum of one policy per file system.

" + } + } + }, + "LifecyclePolicies":{ + "type":"list", + "member":{"shape":"LifecyclePolicy"} + }, + "LifecyclePolicy":{ + "type":"structure", + "members":{ + "TransitionToIA":{ + "shape":"TransitionToIARules", + "documentation":"

A value that indicates how long it takes to transition files to the IA storage class. Currently, the only valid value is AFTER_30_DAYS.

AFTER_30_DAYS indicates files that have not been read from or written to for 30 days are transitioned from the Standard storage class to the IA storage class. Metadata operations such as listing the contents of a directory don't count as a file access event.

" + } + }, + "documentation":"

Describes a policy used by EFS lifecycle management to transition files to the Infrequent Access (IA) storage class.

" + }, "Marker":{"type":"string"}, "MaxItems":{ "type":"integer", @@ -763,13 +852,13 @@ "members":{ "MountTargetId":{ "shape":"MountTargetId", - "documentation":"

ID of the mount target whose security groups you want to modify.

", + "documentation":"

The ID of the mount target whose security groups you want to modify.

", "location":"uri", "locationName":"MountTargetId" }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

Array of up to five VPC security group IDs.

" + "documentation":"

An array of up to five VPC security group IDs.

" } }, "documentation":"

" @@ -808,11 +897,11 @@ }, "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

ID of the file system for which the mount target is intended.

" + "documentation":"

The ID of the file system for which the mount target is intended.

" }, "SubnetId":{ "shape":"SubnetId", - "documentation":"

ID of the mount target's subnet.

" + "documentation":"

The ID of the mount target's subnet.

" }, "LifeCycleState":{ "shape":"LifeCycleState", @@ -820,11 +909,11 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

Address at which the file system may be mounted via the mount target.

" + "documentation":"

Address at which the file system can be mounted by using the mount target.

" }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

ID of the network interface that Amazon EFS created when it created the mount target.

" + "documentation":"

The ID of the network interface that Amazon EFS created when it created the mount target.

" } }, "documentation":"

Provides a description of a mount target.

" @@ -853,7 +942,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The calling account has reached the limit for elastic network interfaces for the specific AWS Region. The client should try to delete some elastic network interfaces or get the account limit raised. For more information, see Amazon VPC Limits in the Amazon VPC User Guide (see the Network interfaces per VPC entry in the table).

", + "documentation":"

The calling account has reached the limit for elastic network interfaces for the specific AWS Region. The client should try to delete some elastic network interfaces or get the account limit raised. For more information, see Amazon VPC Limits in the Amazon VPC User Guide (see the Network interfaces per VPC entry in the table).

", "error":{"httpStatusCode":409}, "exception":true }, @@ -879,6 +968,25 @@ "type":"double", "min":0.0 }, + "PutLifecycleConfigurationRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "LifecyclePolicies" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the file system for which you are creating the LifecycleConfiguration object (String).

", + "location":"uri", + "locationName":"FileSystemId" + }, + "LifecyclePolicies":{ + "shape":"LifecyclePolicies", + "documentation":"

An array of LifecyclePolicy objects that define the file system's LifecycleConfiguration object. A LifecycleConfiguration object tells lifecycle management when to transition files from the Standard storage class to the Infrequent Access storage class.

" + } + } + }, "SecurityGroup":{"type":"string"}, "SecurityGroupLimitExceeded":{ "type":"structure", @@ -928,14 +1036,14 @@ "members":{ "Key":{ "shape":"TagKey", - "documentation":"

Tag key (String). The key can't start with aws:.

" + "documentation":"

The tag key (String). The key can't start with aws:.

" }, "Value":{ "shape":"TagValue", - "documentation":"

Value of the tag key.

" + "documentation":"

The value of the tag key.

" } }, - "documentation":"

A tag is a key-value pair. Allowed characters: letters, whitespace, and numbers, representable in UTF-8, and the following characters: + - = . _ : /

" + "documentation":"

A tag is a key-value pair. Allowed characters are letters, white space, and numbers that can be represented in UTF-8, and the following characters: + - = . _ : /

" }, "TagKey":{ "type":"string", @@ -984,6 +1092,10 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TransitionToIARules":{ + "type":"string", + "enum":["AFTER_30_DAYS"] + }, "UnsupportedAvailabilityZone":{ "type":"structure", "required":["ErrorCode"], @@ -1016,5 +1128,5 @@ } } }, - "documentation":"Amazon Elastic File System

Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so your applications have the storage they need, when they need it. For more information, see the User Guide.

" + "documentation":"Amazon Elastic File System

Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so your applications have the storage they need, when they need it. For more information, see the User Guide.

" } diff --git a/botocore/data/eks/2017-11-01/paginators-1.json b/botocore/data/eks/2017-11-01/paginators-1.json index ea142457..10570b9d 100644 --- a/botocore/data/eks/2017-11-01/paginators-1.json +++ b/botocore/data/eks/2017-11-01/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListClusters": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "clusters" + }, + "ListUpdates": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "updateIds" + } + } } diff --git a/botocore/data/elasticbeanstalk/2010-12-01/paginators-1.json b/botocore/data/elasticbeanstalk/2010-12-01/paginators-1.json index 350cce4d..4f53c866 100644 --- a/botocore/data/elasticbeanstalk/2010-12-01/paginators-1.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/paginators-1.json @@ -5,6 +5,30 @@ "output_token": "NextToken", "limit_key": "MaxRecords", "result_key": "Events" + }, + "DescribeApplicationVersions": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "ApplicationVersions" + }, + "DescribeEnvironmentManagedActionHistory": { + "input_token": "NextToken", + "limit_key": "MaxItems", + "output_token": "NextToken", + "result_key": "ManagedActionHistoryItems" + }, + "DescribeEnvironments": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "Environments" + }, + "ListPlatformVersions": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "PlatformSummaryList" } } } diff --git a/botocore/data/elb/2012-06-01/paginators-1.json b/botocore/data/elb/2012-06-01/paginators-1.json index 444a77a5..b3bd3301 100644 --- a/botocore/data/elb/2012-06-01/paginators-1.json +++ b/botocore/data/elb/2012-06-01/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "NextMarker", "result_key": "LoadBalancerDescriptions", "limit_key": "PageSize" + }, + "DescribeAccountLimits": { + "input_token": "Marker", + "limit_key": "PageSize", + "output_token": "NextMarker", + "result_key": "Limits" } } } diff --git a/botocore/data/elbv2/2015-12-01/paginators-1.json b/botocore/data/elbv2/2015-12-01/paginators-1.json index 138d1763..4521f5c2 100644 --- a/botocore/data/elbv2/2015-12-01/paginators-1.json +++ b/botocore/data/elbv2/2015-12-01/paginators-1.json @@ -17,6 +17,30 @@ "output_token": "NextMarker", "limit_key": "PageSize", "result_key": "Listeners" + }, + "DescribeAccountLimits": { + "input_token": "Marker", + "limit_key": "PageSize", + "output_token": "NextMarker", + "result_key": "Limits" + }, + "DescribeListenerCertificates": { + "input_token": "Marker", + "limit_key": "PageSize", + "output_token": "NextMarker", + "result_key": "Certificates" + }, + "DescribeRules": { + "input_token": "Marker", + "limit_key": "PageSize", + "output_token": "NextMarker", + "result_key": "Rules" + }, + "DescribeSSLPolicies": { + "input_token": "Marker", + "limit_key": "PageSize", + "output_token": "NextMarker", + "result_key": "SslPolicies" } } } diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index ef0a1aa9..e6619d29 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"TooManyCertificatesException"}, {"shape":"CertificateNotFoundException"} ], - "documentation":"

Adds the specified certificate to the specified secure listener.

If the certificate was already added, the call is successful but the certificate is not added again.

To list the certificates for your listener, use DescribeListenerCertificates. To remove certificates from your listener, use RemoveListenerCertificates. To specify the default SSL server certificate, use ModifyListener.

" + "documentation":"

Adds the specified certificate to the specified HTTPS listener.

If the certificate was already added, the call is successful but the certificate is not added again.

To list the certificates for your listener, use DescribeListenerCertificates. To remove certificates from your listener, use RemoveListenerCertificates. To specify the default SSL server certificate, use ModifyListener.

" }, "AddTags":{ "name":"AddTags", @@ -77,7 +77,7 @@ {"shape":"TooManyActionsException"}, {"shape":"InvalidLoadBalancerActionException"} ], - "documentation":"

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

" }, "CreateLoadBalancer":{ "name":"CreateLoadBalancer", @@ -105,7 +105,7 @@ {"shape":"AvailabilityZoneNotSupportedException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Creates an Application Load Balancer or a Network Load Balancer.

When you create a load balancer, you can specify security groups, public subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups, SetSubnets, SetIpAddressType, and AddTags.

To create listeners for your load balancer, use CreateListener. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

For limit information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide and Limits for Your Network Load Balancer in the Network Load Balancers Guide.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple load balancers with the same settings, each call succeeds.

For more information, see Application Load Balancers in the Application Load Balancers Guide and Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Creates an Application Load Balancer or a Network Load Balancer.

When you create a load balancer, you can specify security groups, public subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups, SetSubnets, SetIpAddressType, and AddTags.

To create listeners for your load balancer, use CreateListener. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

For limit information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide and Limits for Your Network Load Balancer in the Network Load Balancers Guide.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple load balancers with the same settings, each call succeeds.

For more information, see Application Load Balancers in the Application Load Balancers Guide and Network Load Balancers in the Network Load Balancers Guide.

" }, "CreateRule":{ "name":"CreateRule", @@ -133,7 +133,7 @@ {"shape":"TooManyActionsException"}, {"shape":"InvalidLoadBalancerActionException"} ], - "documentation":"

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide.

To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule.

" + "documentation":"

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide.

To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule.

" }, "CreateTargetGroup":{ "name":"CreateTargetGroup", @@ -151,7 +151,7 @@ {"shape":"TooManyTargetGroupsException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Creates a target group.

To register targets with the target group, use RegisterTargets. To update the health check settings for the target group, use ModifyTargetGroup. To monitor the health of targets in the target group, use DescribeTargetHealth.

To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule.

To delete a target group, use DeleteTargetGroup.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple target groups with the same settings, each call succeeds.

For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide or Target Groups for Your Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Creates a target group.

To register targets with the target group, use RegisterTargets. To update the health check settings for the target group, use ModifyTargetGroup. To monitor the health of targets in the target group, use DescribeTargetHealth.

To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule.

To delete a target group, use DeleteTargetGroup.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple target groups with the same settings, each call succeeds.

For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide or Target Groups for Your Network Load Balancers in the Network Load Balancers Guide.

" }, "DeleteListener":{ "name":"DeleteListener", @@ -248,7 +248,7 @@ "shape":"DescribeAccountLimitsOutput", "resultWrapper":"DescribeAccountLimitsResult" }, - "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Application Load Balancers in the Application Load Balancer Guide or Limits for Your Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Application Load Balancers in the Application Load Balancer Guide or Limits for Your Network Load Balancers in the Network Load Balancers Guide.

" }, "DescribeListenerCertificates":{ "name":"DescribeListenerCertificates", @@ -264,7 +264,7 @@ "errors":[ {"shape":"ListenerNotFoundException"} ], - "documentation":"

Describes the certificates for the specified secure listener.

" + "documentation":"

Describes the certificates for the specified HTTPS listener.

" }, "DescribeListeners":{ "name":"DescribeListeners", @@ -298,7 +298,7 @@ "errors":[ {"shape":"LoadBalancerNotFoundException"} ], - "documentation":"

Describes the attributes for the specified Application Load Balancer or Network Load Balancer.

For more information, see Load Balancer Attributes in the Application Load Balancers Guide or Load Balancer Attributes in the Network Load Balancers Guide.

" + "documentation":"

Describes the attributes for the specified Application Load Balancer or Network Load Balancer.

For more information, see Load Balancer Attributes in the Application Load Balancers Guide or Load Balancer Attributes in the Network Load Balancers Guide.

" }, "DescribeLoadBalancers":{ "name":"DescribeLoadBalancers", @@ -348,7 +348,7 @@ "errors":[ {"shape":"SSLPolicyNotFoundException"} ], - "documentation":"

Describes the specified policies or all policies used for SSL negotiation.

For more information, see Security Policies in the Application Load Balancers Guide.

" + "documentation":"

Describes the specified policies or all policies used for SSL negotiation.

For more information, see Security Policies in the Application Load Balancers Guide.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -383,7 +383,7 @@ "errors":[ {"shape":"TargetGroupNotFoundException"} ], - "documentation":"

Describes the attributes for the specified target group.

For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide.

" + "documentation":"

Describes the attributes for the specified target group.

For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide.

" }, "DescribeTargetGroups":{ "name":"DescribeTargetGroups", @@ -448,7 +448,7 @@ {"shape":"TooManyActionsException"}, {"shape":"InvalidLoadBalancerActionException"} ], - "documentation":"

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP removes the security policy and SSL certificate properties. If you change the protocol from HTTP to HTTPS, you must add the security policy and server certificate.

" + "documentation":"

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and server certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and server certificate properties.

" }, "ModifyLoadBalancerAttributes":{ "name":"ModifyLoadBalancerAttributes", @@ -560,7 +560,7 @@ {"shape":"ListenerNotFoundException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Removes the specified certificate from the specified secure listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

" + "documentation":"

Removes the specified certificate from the specified HTTPS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

" }, "RemoveTags":{ "name":"RemoveTags", @@ -673,11 +673,11 @@ }, "AuthenticateOidcConfig":{ "shape":"AuthenticateOidcActionConfig", - "documentation":"

[HTTPS listener] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc.

" + "documentation":"

[HTTPS listeners] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc.

" }, "AuthenticateCognitoConfig":{ "shape":"AuthenticateCognitoActionConfig", - "documentation":"

[HTTPS listener] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito.

" + "documentation":"

[HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito.

" }, "Order":{ "shape":"ActionOrder", @@ -863,8 +863,7 @@ "AuthorizationEndpoint", "TokenEndpoint", "UserInfoEndpoint", - "ClientId", - "ClientSecret" + "ClientId" ], "members":{ "Issuer":{ @@ -889,7 +888,7 @@ }, "ClientSecret":{ "shape":"AuthenticateOidcActionClientSecret", - "documentation":"

The OAuth 2.0 client secret.

" + "documentation":"

The OAuth 2.0 client secret. This parameter is required if you are creating a rule. If you are modifying a rule, you can omit this parameter if you set UseExistingClientSecret to true.

" }, "SessionCookieName":{ "shape":"AuthenticateOidcActionSessionCookieName", @@ -910,6 +909,10 @@ "OnUnauthenticatedRequest":{ "shape":"AuthenticateOidcActionConditionalBehaviorEnum", "documentation":"

The behavior if the user is not authenticated. The following are possible values:

" + }, + "UseExistingClientSecret":{ + "shape":"AuthenticateOidcActionUseExistingClientSecret", + "documentation":"

Indicates whether to use the existing client secret when modifying a rule. If you are creating a rule, you can omit this parameter or set it to false.

" } }, "documentation":"

Request parameters when using an identity provider (IdP) that is compliant with OpenID Connect (OIDC) to authenticate users.

" @@ -919,6 +922,7 @@ "AuthenticateOidcActionSessionCookieName":{"type":"string"}, "AuthenticateOidcActionSessionTimeout":{"type":"long"}, "AuthenticateOidcActionTokenEndpoint":{"type":"string"}, + "AuthenticateOidcActionUseExistingClientSecret":{"type":"boolean"}, "AuthenticateOidcActionUserInfoEndpoint":{"type":"string"}, "AvailabilityZone":{ "type":"structure", @@ -1025,7 +1029,7 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocol is TCP.

" + "documentation":"

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP and TLS.

" }, "Port":{ "shape":"Port", @@ -1033,15 +1037,15 @@ }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

[HTTPS listeners] The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.

" + "documentation":"

[HTTPS and TLS listeners] The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.

" }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

" + "documentation":"

[HTTPS and TLS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

[HTTPS listener] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listener] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" } } }, @@ -1124,7 +1128,7 @@ }, "Actions":{ "shape":"Actions", - "documentation":"

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

[HTTPS listener] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listener] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" } } }, @@ -1147,7 +1151,7 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocol is TCP. If the target is a Lambda function, this parameter does not apply.

" + "documentation":"

The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP and TLS. If the target is a Lambda function, this parameter does not apply.

" }, "Port":{ "shape":"Port", @@ -1159,7 +1163,7 @@ }, "HealthCheckProtocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported only if the protocol of the target group is TCP. For Application Load Balancers, the default is HTTP. For Network Load Balancers, the default is TCP.

" + "documentation":"

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers, the default is TCP. The TCP protocol is supported for health checks only if the protocol of the target group is TCP or TLS. The TLS protocol is not supported for health checks.

" }, "HealthCheckPort":{ "shape":"HealthCheckPort", @@ -1847,7 +1851,7 @@ }, "Certificates":{ "shape":"CertificateList", - "documentation":"

The SSL server certificate. You must provide a certificate if the protocol is HTTPS.

" + "documentation":"

The SSL server certificate. You must provide a certificate if the protocol is HTTPS or TLS.

" }, "SslPolicy":{ "shape":"SslPolicyName", @@ -1963,7 +1967,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by only Application Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false. The default is true.

The following attributes are supported by only Network Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

" + "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by only Application Load Balancers:

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false. The default is true.

The following attributes are supported by only Network Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

" }, "Value":{ "shape":"LoadBalancerAttributeValue", @@ -2071,19 +2075,19 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol for connections from clients to the load balancer. Application Load Balancers support HTTP and HTTPS and Network Load Balancers support TCP.

" + "documentation":"

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP and TLS protocols.

" }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

[HTTPS listeners] The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide.

" + "documentation":"

[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide.

" }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

" + "documentation":"

[HTTPS and TLS listeners] The default SSL server certificate. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

[HTTPS listener] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listener] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" } } }, @@ -2136,7 +2140,7 @@ }, "Actions":{ "shape":"Actions", - "documentation":"

The actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

[HTTPS listener] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listener] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" } } }, @@ -2185,7 +2189,7 @@ }, "HealthCheckProtocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported only if the protocol of the target group is TCP.

If the protocol of the target group is TCP, you can't modify this setting.

" + "documentation":"

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP or TLS. The TLS protocol is not supported for health checks.

If the protocol of the target group is TCP, you can't modify this setting.

" }, "HealthCheckPort":{ "shape":"HealthCheckPort", @@ -2275,7 +2279,8 @@ "enum":[ "HTTP", "HTTPS", - "TCP" + "TCP", + "TLS" ] }, "RedirectActionConfig":{ @@ -3101,5 +3106,5 @@ "VpcId":{"type":"string"}, "ZoneName":{"type":"string"} }, - "documentation":"Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC).

A Classic Load Balancer makes routing and load balancing decisions either at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and supports either EC2-Classic or a VPC. For more information, see the Elastic Load Balancing User Guide.

This reference covers the 2015-12-01 API, which supports Application Load Balancers and Network Load Balancers. The 2012-06-01 API supports Classic Load Balancers.

To get started, complete the following tasks:

  1. Create a load balancer using CreateLoadBalancer.

  2. Create a target group using CreateTargetGroup.

  3. Register targets for the target group using RegisterTargets.

  4. Create one or more listeners for your load balancer using CreateListener.

To delete a load balancer and its related resources, complete the following tasks:

  1. Delete the load balancer using DeleteLoadBalancer.

  2. Delete the target group using DeleteTargetGroup.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

" + "documentation":"Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP/TLS). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC).

A Classic Load Balancer makes routing and load balancing decisions either at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and supports either EC2-Classic or a VPC. For more information, see the Elastic Load Balancing User Guide.

This reference covers the 2015-12-01 API, which supports Application Load Balancers and Network Load Balancers. The 2012-06-01 API supports Classic Load Balancers.

To get started, complete the following tasks:

  1. Create a load balancer using CreateLoadBalancer.

  2. Create a target group using CreateTargetGroup.

  3. Register targets for the target group using RegisterTargets.

  4. Create one or more listeners for your load balancer using CreateListener.

To delete a load balancer and its related resources, complete the following tasks:

  1. Delete the load balancer using DeleteLoadBalancer.

  2. Delete the target group using DeleteTargetGroup.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

" } diff --git a/botocore/data/emr/2009-03-31/paginators-1.json b/botocore/data/emr/2009-03-31/paginators-1.json index 67ccf448..023eb7cc 100644 --- a/botocore/data/emr/2009-03-31/paginators-1.json +++ b/botocore/data/emr/2009-03-31/paginators-1.json @@ -29,6 +29,11 @@ "input_token": "Marker", "output_token": "Marker", "result_key": "InstanceFleets" + }, + "ListSecurityConfigurations": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "SecurityConfigurations" } } } diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index 69a49569..3337fe40 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -10,7 +10,6 @@ "serviceId":"EMR", "signatureVersion":"v4", "targetPrefix":"ElasticMapReduce", - "timestampFormat":"unixTimestamp", "uid":"elasticmapreduce-2009-03-31" }, "operations":{ @@ -525,7 +524,7 @@ "documentation":"

This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.

" } }, - "documentation":"

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Using the MapR Distribution for Hadoop. Currently supported values are:

  • \"mapr-m3\" - launch the cluster using MapR M3 Edition.

  • \"mapr-m5\" - launch the cluster using MapR M5 Edition.

  • \"mapr\" with the user arguments specifying \"--edition,m3\" or \"--edition,m5\" - launch the cluster using MapR M3 or M5 Edition, respectively.

In Amazon EMR releases 4.x and later, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

" + "documentation":"

With Amazon EMR release version 4.0 and later, the only accepted parameter is the application name. To pass arguments to applications, you use configuration classifications specified using configuration JSON objects. For more information, see Configuring Applications.

With earlier Amazon EMR releases, the application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument.

" }, "ApplicationList":{ "type":"list", @@ -1286,7 +1285,7 @@ }, "EmrManagedSlaveSecurityGroup":{ "shape":"String", - "documentation":"

The identifier of the Amazon EC2 security group for the slave nodes.

" + "documentation":"

The identifier of the Amazon EC2 security group for the core and task nodes.

" }, "ServiceAccessSecurityGroup":{ "shape":"String", @@ -1298,7 +1297,7 @@ }, "AdditionalSlaveSecurityGroups":{ "shape":"StringList", - "documentation":"

A list of additional Amazon EC2 security group IDs for the slave nodes.

" + "documentation":"

A list of additional Amazon EC2 security group IDs for the core and task nodes.

" } }, "documentation":"

Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

" @@ -2290,7 +2289,7 @@ }, "SlaveInstanceType":{ "shape":"InstanceType", - "documentation":"

The EC2 instance type of the slave nodes.

" + "documentation":"

The EC2 instance type of the core and task nodes.

" }, "InstanceCount":{ "shape":"Integer", @@ -2338,7 +2337,7 @@ }, "EmrManagedSlaveSecurityGroup":{ "shape":"XmlStringMaxLen256", - "documentation":"

The identifier of the Amazon EC2 security group for the slave nodes.

" + "documentation":"

The identifier of the Amazon EC2 security group for the core and task nodes.

" }, "ServiceAccessSecurityGroup":{ "shape":"XmlStringMaxLen256", @@ -2350,7 +2349,7 @@ }, "AdditionalSlaveSecurityGroups":{ "shape":"SecurityGroupsList", - "documentation":"

A list of additional Amazon EC2 security group IDs for the slave nodes.

" + "documentation":"

A list of additional Amazon EC2 security group IDs for the core and task nodes.

" } }, "documentation":"

A description of the Amazon EC2 instance on which the cluster (job flow) runs. A valid JobFlowInstancesConfig must contain either InstanceGroups or InstanceFleets, which is the recommended configuration. They cannot be used together. You may also have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present), but we don't recommend this configuration.

" @@ -2377,11 +2376,11 @@ }, "SlaveInstanceType":{ "shape":"InstanceType", - "documentation":"

The Amazon EC2 slave node instance type.

" + "documentation":"

The Amazon EC2 core and task node instance type.

" }, "InstanceCount":{ "shape":"Integer", - "documentation":"

The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and slave node. If the value is greater than 1, one instance is the master node and all others are slave nodes.

" + "documentation":"

The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.

" }, "InstanceGroups":{ "shape":"InstanceGroupDetailList", @@ -2916,7 +2915,7 @@ }, "Applications":{ "shape":"ApplicationList", - "documentation":"

For Amazon EMR releases 4.0 and later. A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

" + "documentation":"

Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster. For a list of applications available for each Amazon EMR release version, see the Amazon EMR Release Guide.

" }, "Configurations":{ "shape":"ConfigurationList", @@ -3180,7 +3179,7 @@ }, "TimeoutAction":{ "shape":"SpotProvisioningTimeoutAction", - "documentation":"

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.

" + "documentation":"

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.

" }, "BlockDurationMinutes":{ "shape":"WholeNumber", @@ -3223,7 +3222,7 @@ }, "ActionOnFailure":{ "shape":"ActionOnFailure", - "documentation":"

This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.

" + "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" }, "Status":{ "shape":"StepStatus", @@ -3245,7 +3244,7 @@ }, "ActionOnFailure":{ "shape":"ActionOnFailure", - "documentation":"

The action to take if the step fails.

" + "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" }, "HadoopJarStep":{ "shape":"HadoopJarStepConfig", @@ -3400,7 +3399,7 @@ }, "ActionOnFailure":{ "shape":"ActionOnFailure", - "documentation":"

This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.

" + "documentation":"

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" }, "Status":{ "shape":"StepStatus", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 1a22d50e..07b9e12c 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -102,13 +102,114 @@ "us-west-2" : { } } }, + "api.ecr" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "api.ecr.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "api.ecr.ap-northeast-2.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "api.ecr.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "api.ecr.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "api.ecr.ap-southeast-2.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "api.ecr.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "api.ecr.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "api.ecr.eu-north-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "api.ecr.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "api.ecr.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "api.ecr.eu-west-3.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "api.ecr.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api.ecr.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "api.ecr.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "api.ecr.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api.ecr.us-west-2.amazonaws.com" + } + } + }, "api.mediatailor" : { "endpoints" : { "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-west-1" : { }, - "us-east-1" : { } + "us-east-1" : { }, + "us-west-2" : { } } }, "api.pricing" : { @@ -134,9 +235,33 @@ "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api-fips.sagemaker.us-east-1.amazonaws.com" + }, "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "api-fips.sagemaker.us-east-2.amazonaws.com" + }, "us-west-1" : { }, - "us-west-2" : { } + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "api-fips.sagemaker.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api-fips.sagemaker.us-west-2.amazonaws.com" + } } }, "apigateway" : { @@ -195,6 +320,10 @@ }, "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } @@ -285,6 +414,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -662,7 +792,10 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -724,12 +857,27 @@ "us-west-2" : { } } }, + "datasync" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "dax" : { "endpoints" : { "ap-northeast-1" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -777,6 +925,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -787,6 +936,34 @@ "us-west-2" : { } } }, + "docdb" : { + "endpoints" : { + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "rds.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "rds.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "rds.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "rds.us-west-2.amazonaws.com" + } + } + }, "ds" : { "endpoints" : { "ap-northeast-1" : { }, @@ -858,26 +1035,6 @@ "us-west-2" : { } } }, - "ecr" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, "ecs" : { "endpoints" : { "ap-northeast-1" : { }, @@ -952,6 +1109,7 @@ "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1051,6 +1209,12 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "es-fips.us-west-1.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1111,6 +1275,14 @@ "us-west-2" : { } } }, + "fsx" : { + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "gamelift" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1163,6 +1335,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1321,6 +1494,12 @@ }, "kms" : { "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "kms-fips.ca-central-1.amazonaws.com" + }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1359,6 +1538,21 @@ "us-west-2" : { } } }, + "license-manager" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "lightsail" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1536,6 +1730,20 @@ "us-west-2" : { } } }, + "mq" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "mturk-requester" : { "endpoints" : { "sandbox" : { @@ -1547,12 +1755,24 @@ }, "neptune" : { "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "rds.ap-northeast-1.amazonaws.com" + }, "ap-southeast-1" : { "credentialScope" : { "region" : "ap-southeast-1" }, "hostname" : "rds.ap-southeast-1.amazonaws.com" }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "rds.ap-southeast-2.amazonaws.com" + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -1642,6 +1862,7 @@ } }, "endpoints" : { + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } @@ -1656,6 +1877,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1711,6 +1933,8 @@ "rekognition" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-2" : { }, "eu-west-1" : { }, "us-east-1" : { }, @@ -1762,6 +1986,27 @@ "us-east-1" : { } } }, + "route53resolver" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "runtime.lex" : { "defaults" : { "credentialScope" : { @@ -2057,6 +2302,25 @@ } } }, + "securityhub" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] @@ -2115,6 +2379,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2487,6 +2752,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -2628,6 +2894,22 @@ } }, "services" : { + "api.ecr" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "api.ecr.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "api.ecr.cn-northwest-1.amazonaws.com.cn" + } + } + }, "apigateway" : { "endpoints" : { "cn-north-1" : { }, @@ -2738,12 +3020,6 @@ "cn-northwest-1" : { } } }, - "ecr" : { - "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } - } - }, "ecs" : { "endpoints" : { "cn-north-1" : { }, @@ -2792,6 +3068,17 @@ "cn-northwest-1" : { } } }, + "firehose" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "gamelift" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "glacier" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -2935,6 +3222,12 @@ "cn-northwest-1" : { } } }, + "states" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "storagegateway" : { "endpoints" : { "cn-north-1" : { } @@ -2996,6 +3289,22 @@ "us-gov-west-1" : { } } }, + "api.ecr" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "api.ecr.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "api.ecr.us-gov-west-1.amazonaws.com" + } + } + }, "api.sagemaker" : { "endpoints" : { "us-gov-west-1" : { } @@ -3013,6 +3322,11 @@ "us-gov-west-1" : { } } }, + "athena" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "autoscaling" : { "endpoints" : { "us-gov-east-1" : { }, @@ -3124,12 +3438,6 @@ "us-gov-west-1" : { } } }, - "ecr" : { - "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } - } - }, "ecs" : { "endpoints" : { "us-gov-east-1" : { }, @@ -3177,6 +3485,12 @@ }, "es" : { "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "es-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -3187,6 +3501,11 @@ "us-gov-west-1" : { } } }, + "firehose" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "glacier" : { "endpoints" : { "us-gov-east-1" : { }, @@ -3195,6 +3514,11 @@ } } }, + "glue" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "guardduty" : { "defaults" : { "protocols" : [ "https" ] @@ -3240,6 +3564,12 @@ }, "kms" : { "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "kms-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -3256,6 +3586,11 @@ "us-gov-west-1" : { } } }, + "mediaconvert" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "metering.marketplace" : { "defaults" : { "credentialScope" : { @@ -3450,6 +3785,11 @@ "hostname" : "translate-fips.us-gov-west-1.amazonaws.com" } } + }, + "workspaces" : { + "endpoints" : { + "us-gov-west-1" : { } + } } } } ], diff --git a/botocore/data/es/2015-01-01/paginators-1.json b/botocore/data/es/2015-01-01/paginators-1.json index b6fefece..4c0f24e4 100644 --- a/botocore/data/es/2015-01-01/paginators-1.json +++ b/botocore/data/es/2015-01-01/paginators-1.json @@ -11,6 +11,24 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "DescribeReservedElasticsearchInstanceOfferings": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ReservedElasticsearchInstanceOfferings" + }, + "DescribeReservedElasticsearchInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ReservedElasticsearchInstances" + }, + "GetUpgradeHistory": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "UpgradeHistories" } } } diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index dc2568ff..8c42b356 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -982,6 +982,10 @@ "shape":"Boolean", "documentation":"

A boolean value to indicate whether zone awareness is enabled. See About Zone Awareness for more information.

" }, + "ZoneAwarenessConfig":{ + "shape":"ZoneAwarenessConfig", + "documentation":"

Specifies the zone awareness configuration for a domain when zone awareness is enabled.

" + }, "DedicatedMasterType":{ "shape":"ESPartitionInstanceType", "documentation":"

The instance type for a dedicated master node.

" @@ -2259,6 +2263,16 @@ "gp2", "io1" ] + }, + "ZoneAwarenessConfig":{ + "type":"structure", + "members":{ + "AvailabilityZoneCount":{ + "shape":"IntegerClass", + "documentation":"

An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled

" + } + }, + "documentation":"

Specifies the zone awareness configuration for the domain cluster, such as the number of availability zones.

" } }, "documentation":"Amazon Elasticsearch Configuration Service

Use the Amazon Elasticsearch configuration API to create, configure, and manage Elasticsearch domains.

The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

" diff --git a/botocore/data/fms/2018-01-01/paginators-1.json b/botocore/data/fms/2018-01-01/paginators-1.json index ea142457..d72296c5 100644 --- a/botocore/data/fms/2018-01-01/paginators-1.json +++ b/botocore/data/fms/2018-01-01/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "ListComplianceStatus": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PolicyComplianceStatusList" + }, + "ListMemberAccounts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MemberAccounts" + }, + "ListPolicies": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PolicyList" + } + } } diff --git a/botocore/data/fms/2018-01-01/service-2.json b/botocore/data/fms/2018-01-01/service-2.json index 9022388e..dc849154 100644 --- a/botocore/data/fms/2018-01-01/service-2.json +++ b/botocore/data/fms/2018-01-01/service-2.json @@ -68,7 +68,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Disassociates the account that has been set as the AWS Firewall Manager administrator account. You will need to submit an AssociateAdminAccount request to set a new account as the AWS Firewall administrator.

" + "documentation":"

Disassociates the account that has been set as the AWS Firewall Manager administrator account. To set a different account as the administrator account, you must submit an AssociateAdminAccount request .

" }, "GetAdminAccount":{ "name":"GetAdminAccount", @@ -286,6 +286,10 @@ "PolicyId":{ "shape":"PolicyId", "documentation":"

The ID of the policy that you want to delete. PolicyId is returned by PutPolicy and by ListPolicies.

" + }, + "DeleteAllPolicyResources":{ + "shape":"Boolean", + "documentation":"

If True, the request will also delete all web ACLs in this policy. Associated resources will no longer be protected by web ACLs in this policy.

" } } }, @@ -499,7 +503,7 @@ }, "MaxResults":{ "shape":"PaginationMaxResults", - "documentation":"

Specifies the number of member account IDs that you want AWS Firewall Manager to return for this request. If you have more IDs than the number that you specify for MaxResults, the response includes a NextToken value that you can use to get another batch of member account IDs. The maximum value for MaxResults is 100.

" + "documentation":"

Specifies the number of member account IDs that you want AWS Firewall Manager to return for this request. If you have more IDs than the number that you specify for MaxResults, the response includes a NextToken value that you can use to get another batch of member account IDs.

" } } }, @@ -604,11 +608,11 @@ }, "IncludeMap":{ "shape":"CustomerPolicyScopeMap", - "documentation":"

Specifies the AWS account IDs to include in the policy. If IncludeMap is null, all accounts in the AWS Organization are included in the policy. If IncludeMap is not null, only values listed in IncludeMap will be included in the policy.

The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" + "documentation":"

Specifies the AWS account IDs to include in the policy. If IncludeMap is null, all accounts in the organization in AWS Organizations are included in the policy. If IncludeMap is not null, only values listed in IncludeMap are included in the policy.

The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" }, "ExcludeMap":{ "shape":"CustomerPolicyScopeMap", - "documentation":"

Specifies the AWS account IDs to exclude from the policy. The IncludeMap values are evaluated first, with all of the appropriate account IDs added to the policy. Then the accounts listed in ExcludeMap are removed, resulting in the final list of accounts to add to the policy.

The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" + "documentation":"

Specifies the AWS account IDs to exclude from the policy. The IncludeMap values are evaluated first, with all the appropriate account IDs added to the policy. Then the accounts listed in ExcludeMap are removed, resulting in the final list of accounts to add to the policy.

The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" } }, "documentation":"

An AWS Firewall Manager policy.

" @@ -642,7 +646,7 @@ }, "IssueInfoMap":{ "shape":"IssueInfoMap", - "documentation":"

Details about problems with dependent services, such as AWS WAF or AWS Config, that are causing a resource to be non-compliant. The details include the name of the dependent service and the error message recieved indicating the problem with the service.

" + "documentation":"

Details about problems with dependent services, such as AWS WAF or AWS Config, that are causing a resource to be non-compliant. The details include the name of the dependent service and the error message received that indicates the problem with the service.

" } }, "documentation":"

Describes the non-compliant resources in a member account for a specific AWS Firewall Manager policy. A maximum of 100 entries are displayed. If more than 100 resources are non-compliant, EvaluationLimitExceeded is set to True.

" @@ -676,7 +680,7 @@ }, "IssueInfoMap":{ "shape":"IssueInfoMap", - "documentation":"

Details about problems with dependent services, such as AWS WAF or AWS Config, that are causing a resource to be non-compliant. The details include the name of the dependent service and the error message recieved indicating the problem with the service.

" + "documentation":"

Details about problems with dependent services, such as AWS WAF or AWS Config, that are causing a resource to be non-compliant. The details include the name of the dependent service and the error message received that indicates the problem with the service.

" } }, "documentation":"

Indicates whether the account is compliant with the specified policy. An account is considered non-compliant if it includes resources that are not protected by the policy.

" diff --git a/botocore/data/fsx/2018-03-01/paginators-1.json b/botocore/data/fsx/2018-03-01/paginators-1.json index ea142457..863c57a3 100644 --- a/botocore/data/fsx/2018-03-01/paginators-1.json +++ b/botocore/data/fsx/2018-03-01/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "DescribeBackups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Backups" + }, + "DescribeFileSystems": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "FileSystems" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" + } + } } diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index c39670b3..0e9ba7aa 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -44,6 +44,7 @@ {"shape":"ActiveDirectoryError"}, {"shape":"IncompatibleParameterError"}, {"shape":"InvalidImportPath"}, + {"shape":"InvalidExportPath"}, {"shape":"InvalidNetworkSettings"}, {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"}, @@ -81,6 +82,7 @@ "output":{"shape":"DeleteBackupResponse"}, "errors":[ {"shape":"BadRequest"}, + {"shape":"BackupInProgress"}, {"shape":"BackupNotFound"}, {"shape":"BackupRestoring"}, {"shape":"IncompatibleParameterError"}, @@ -476,7 +478,11 @@ }, "ImportPath":{ "shape":"ArchivePath", - "documentation":"

(Optional) The path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example s3://import-bucket/optional-prefix. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.

" + "documentation":"

(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.

" + }, + "ExportPath":{ + "shape":"ArchivePath", + "documentation":"

(Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z.

The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath. If you only specify a bucket name, such as s3://import-bucket, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.

" }, "ImportedFileChunkSize":{ "shape":"Megabytes", @@ -595,7 +601,7 @@ }, "ExportPath":{ "shape":"ArchivePath", - "documentation":"

The Amazon S3 commit path to use for storing new and changed Lustre file system files as part of the archive operation from the file system to Amazon S3. The value is s3://import-bucket/FSxLustre[creationtimestamp]. The timestamp is presented in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z. Files are archived to a different prefix in the Amazon S3 bucket, preventing input data from being overwritten.

" + "documentation":"

The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.

" }, "ImportedFileChunkSize":{ "shape":"Megabytes", @@ -812,7 +818,7 @@ }, "NetworkInterfaceIds":{ "shape":"NetworkInterfaceIds", - "documentation":"

The IDs of the elastic network interface from which a specific file system is accessible. The elastic network interface is automatically created in the same VPC that the Amazon FSx file system was created in. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide.

For an Amazon FSx for Windows File Server file system, you can have one network interface Id. For an Amazon FSx for Lustre file system, you can have more than one.

" + "documentation":"

The IDs of the elastic network interface from which a specific file system is accessible. The elastic network interface is automatically created in the same VPC that the Amazon FSx file system was created in. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide.

For an Amazon FSx for Windows File Server file system, you can have one network interface Id. For an Amazon FSx for Lustre file system, you can have more than one.

" }, "DNSName":{ "shape":"DNSName", @@ -828,7 +834,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to associate with the file system. For more information, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide.

" + "documentation":"

The tags to associate with the file system. For more information, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide.

" }, "WindowsConfiguration":{ "shape":"WindowsFileSystemConfiguration", @@ -971,6 +977,14 @@ "exception":true, "fault":true }, + "InvalidExportPath":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The path provided for data repository export isn't valid.

", + "exception":true + }, "InvalidImportPath":{ "type":"structure", "members":{ @@ -991,7 +1005,7 @@ }, "KmsKeyId":{ "type":"string", - "documentation":"

The ID of your AWS Key Management Service (AWS KMS) key. This ID is used to encrypt the data in your file system at rest. For more information, see Encrypt in the AWS Key Management Service API Reference.

", + "documentation":"

The ID of your AWS Key Management Service (AWS KMS) key. This ID is used to encrypt the data in your file system at rest. For more information, see Encrypt in the AWS Key Management Service API Reference.

", "max":2048, "min":1, "pattern":"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[89aAbB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}|arn:aws[a-z-]{0,7}:kms:[a-z]{2}-[a-z-]{4,}-\\d+:\\d{12}:(key|alias)\\/([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[89aAbB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}|[a-zA-Z0-9:\\/_-]+)|alias\\/[a-zA-Z0-9:\\/_-]+$" @@ -1066,7 +1080,7 @@ }, "NetworkInterfaceId":{ "type":"string", - "documentation":"

An elastic network interface ID. An elastic network interface is a logical networking component in a virtual private cloud (VPC) that represents a virtual network card. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

An elastic network interface ID. An elastic network interface is a logical networking component in a virtual private cloud (VPC) that represents a virtual network card. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide for Linux Instances.

", "max":21, "min":12, "pattern":"^(eni-[0-9a-f]{8,})$" @@ -1110,7 +1124,7 @@ }, "ResourceARN":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify AWS resources. We require an ARN when you need to specify a resource unambiguously across all of AWS. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", + "documentation":"

The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify AWS resources. We require an ARN when you need to specify a resource unambiguously across all of AWS. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "max":512, "min":8, "pattern":"^arn:aws[a-z-]{0,7}:[A-Za-z0-9][A-za-z0-9_/.-]{0,62}:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.-]{0,127}$" @@ -1143,7 +1157,7 @@ }, "SecurityGroupId":{ "type":"string", - "documentation":"

The ID of your Amazon EC2 security group. This ID is used to control network access to the endpoint that Amazon FSx creates on your behalf in each subnet. For more information, see Amazon EC2 Security Groups for Linux Instances in the Amazon EC2 User Guide.

", + "documentation":"

The ID of your Amazon EC2 security group. This ID is used to control network access to the endpoint that Amazon FSx creates on your behalf in each subnet. For more information, see Amazon EC2 Security Groups for Linux Instances in the Amazon EC2 User Guide.

", "max":20, "min":11, "pattern":"^(sg-[0-9a-f]{8,})$" @@ -1180,11 +1194,11 @@ "StorageCapacity":{ "type":"integer", "documentation":"

The storage capacity for your Amazon FSx file system, in gibibytes.

", - "min":300 + "min":1 }, "SubnetId":{ "type":"string", - "documentation":"

The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.

", + "documentation":"

The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.

", "max":24, "min":15, "pattern":"^(subnet-[0-9a-f]{8,})$" @@ -1213,8 +1227,7 @@ "type":"string", "documentation":"

A string of 1 to 128 characters that specifies the key for a tag. Tag keys must be unique for the resource to which they are attached.

", "max":128, - "min":1, - "pattern":"^(^(?!aws:).[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "min":1 }, "TagKeys":{ "type":"list", @@ -1251,8 +1264,7 @@ "type":"string", "documentation":"

A string of 0 to 256 characters that specifies the value for a tag. Tag values can be null and don't have to be unique in a tag set.

", "max":256, - "min":0, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "min":0 }, "Tags":{ "type":"list", @@ -1343,7 +1355,7 @@ }, "VpcId":{ "type":"string", - "documentation":"

The ID of your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.

", + "documentation":"

The ID of your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.

", "max":21, "min":12, "pattern":"^(vpc-[0-9a-f]{8,})$" diff --git a/botocore/data/gamelift/2015-10-01/paginators-1.json b/botocore/data/gamelift/2015-10-01/paginators-1.json index ea142457..270e8408 100644 --- a/botocore/data/gamelift/2015-10-01/paginators-1.json +++ b/botocore/data/gamelift/2015-10-01/paginators-1.json @@ -1,3 +1,100 @@ { - "pagination": {} + "pagination": { + "DescribeFleetAttributes": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "FleetAttributes" + }, + "DescribeFleetCapacity": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "FleetCapacity" + }, + "DescribeFleetEvents": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Events" + }, + "DescribeFleetUtilization": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "FleetUtilization" + }, + "DescribeGameSessionDetails": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "GameSessionDetails" + }, + "DescribeGameSessionQueues": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "GameSessionQueues" + }, + "DescribeGameSessions": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "GameSessions" + }, + "DescribeInstances": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Instances" + }, + "DescribeMatchmakingConfigurations": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Configurations" + }, + "DescribeMatchmakingRuleSets": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "RuleSets" + }, + "DescribePlayerSessions": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "PlayerSessions" + }, + "DescribeScalingPolicies": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "ScalingPolicies" + }, + "ListAliases": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Aliases" + }, + "ListBuilds": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "Builds" + }, + "ListFleets": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "FleetIds" + }, + "SearchGameSessions": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "GameSessions" + } + } } diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 4b399380..54bea907 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Matchmaking-related operations include:

" + "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

" }, "CreateAlias":{ "name":"CreateAlias", @@ -43,7 +43,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. By using a fleet alias instead of a specific fleet ID, you can switch gameplay and players to a new fleet without changing your game client or other game components. For example, for games in production, using an alias allows you to seamlessly redirect your player base to a new game server update.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID, which you can reference when creating a game session. You can reassign an alias to another fleet by calling UpdateAlias.

Alias-related operations include:

" + "documentation":"

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. By using a fleet alias instead of a specific fleet ID, you can switch gameplay and players to a new fleet without changing your game client or other game components. For example, for games in production, using an alias allows you to seamlessly redirect your player base to a new game server update.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID, which you can reference when creating a game session. You can reassign an alias to another fleet by calling UpdateAlias.

" }, "CreateBuild":{ "name":"CreateBuild", @@ -59,7 +59,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new Amazon GameLift build record for your game server binary files and points to the location of your game server build files in an Amazon Simple Storage Service (Amazon S3) location.

Game server binaries must be combined into a .zip file for use with Amazon GameLift. See Uploading Your Game for more information.

To create new builds quickly and easily, use the AWS CLI command upload-build . This helper command uploads your build and creates a new build record in one step, and automatically handles the necessary permissions. See Upload Build Files to Amazon GameLift for more help.

The CreateBuild operation should be used only when you need to manually upload your build files, as in the following scenarios:

  • Store a build file in an Amazon S3 bucket under your own AWS account. To use this option, you must first give Amazon GameLift access to that Amazon S3 bucket. See Create a Build with Files in Amazon S3 for detailed help. To create a new build record using files in your Amazon S3 bucket, call CreateBuild and specify a build name, operating system, and the storage location of your game build.

  • Upload a build file directly to Amazon GameLift's Amazon S3 account. To use this option, you first call CreateBuild with a build name and operating system. This action creates a new build record and returns an Amazon S3 storage location (bucket and key only) and temporary access credentials. Use the credentials to manually upload your build file to the storage location (see the Amazon S3 topic Uploading Objects). You can upload files to a location only once.

If successful, this operation creates a new build record with a unique build ID and places it in INITIALIZED status. You can use DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets.

Build-related operations include:

" + "documentation":"

Creates a new Amazon GameLift build record for your game server binary files and points to the location of your game server build files in an Amazon Simple Storage Service (Amazon S3) location.

Game server binaries must be combined into a .zip file for use with Amazon GameLift. See Uploading Your Game for more information.

To create new builds quickly and easily, use the AWS CLI command upload-build . This helper command uploads your build and creates a new build record in one step, and automatically handles the necessary permissions. See Upload Build Files to Amazon GameLift for more help.

The CreateBuild operation should be used only when you need to manually upload your build files, as in the following scenarios:

  • Store a build file in an Amazon S3 bucket under your own AWS account. To use this option, you must first give Amazon GameLift access to that Amazon S3 bucket. See Create a Build with Files in Amazon S3 for detailed help. To create a new build record using files in your Amazon S3 bucket, call CreateBuild and specify a build name, operating system, and the storage location of your game build.

  • Upload a build file directly to Amazon GameLift's Amazon S3 account. To use this option, you first call CreateBuild with a build name and operating system. This action creates a new build record and returns an Amazon S3 storage location (bucket and key only) and temporary access credentials. Use the credentials to manually upload your build file to the storage location (see the Amazon S3 topic Uploading Objects). You can upload files to a location only once.

If successful, this operation creates a new build record with a unique build ID and places it in INITIALIZED status. You can use DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets.

" }, "CreateFleet":{ "name":"CreateFleet", @@ -77,7 +77,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You set up a fleet to use instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy your game build to run on each instance.

To create a new fleet, you must specify the following: (1) a fleet name, (2) the build ID of a successfully uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration, which describes the server processes to run on each instance in the fleet. If you don't specify a fleet type (on-demand or spot), the new fleet uses on-demand instances by default.

You can also configure the new fleet with the following settings:

  • Fleet description

  • Access permissions for inbound traffic

  • Fleet-wide game session protection

  • Resource usage limits

If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. By adding multiple fleets to a metric group, you can view aggregated metrics for all the fleets in the group.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

    Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Fleet-related operations include:

" + "documentation":"

Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You set up a fleet to use instances with certain hardware specifications (see Amazon EC2 Instance Types), and deploy your game build to the fleet.

To create a new fleet, you must provide the following: (1) a fleet name, (2) an EC2 instance type, (3) the build ID for your game build, and (4) a run-time configuration, which specifies the server processes to run on each instance in the fleet. If fleet type is not set, the new fleet will use on-demand instances by default.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

    Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Learn more

See Amazon GameLift Developer Guide topics in Working with Fleets.

Related operations

" }, "CreateGameSession":{ "name":"CreateGameSession", @@ -99,7 +99,7 @@ {"shape":"LimitExceededException"}, {"shape":"IdempotentParameterMismatchException"} ], - "documentation":"

Creates a multiplayer game session for players. This action creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE status before a game session can be created in it.

To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing the game session properties and other settings you specified.

Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.

Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.

Player acceptance policy. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

Game session logs. Logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

Available in Amazon GameLift Local.

Game-session-related operations include:

" + "documentation":"

Creates a multiplayer game session for players. This action creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE status before a game session can be created in it.

To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing the game session properties and other settings you specified.

Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.

Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.

Player acceptance policy. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

Game session logs. Logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

Available in Amazon GameLift Local.

" }, "CreateGameSessionQueue":{ "name":"CreateGameSessionQueue", @@ -115,7 +115,7 @@ {"shape":"UnauthorizedException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

Queue-related operations include:

" + "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

" }, "CreateMatchmakingConfiguration":{ "name":"CreateMatchmakingConfiguration", @@ -132,7 +132,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.

Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only \"best effort\" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

Operations related to match configurations and rule sets include:

" + "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.

Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only \"best effort\" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

" }, "CreateMatchmakingRuleSet":{ "name":"CreateMatchmakingRuleSet", @@ -147,7 +147,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. Rule sets are used in matchmaking configurations, which define how matchmaking requests are handled. Each MatchmakingConfiguration uses one rule set; you can set up multiple rule sets to handle the scenarios that suit your game (such as for different game modes), and create a separate matchmaking configuration for each rule set. See additional information on rule set content in the MatchmakingRuleSet structure. For help creating rule sets, including useful examples, see the topic Adding FlexMatch to Your Game.

Once created, matchmaking rule sets cannot be changed or deleted, so we recommend checking the rule set syntax using ValidateMatchmakingRuleSet before creating the rule set.

To create a matchmaking rule set, provide the set of rules and a unique name. Rule sets must be defined in the same region as the matchmaking configuration they will be used with. Rule sets cannot be edited or deleted. If you need to change a rule set, create a new one with the necessary edits and then update matchmaking configurations to use the new rule set.

Operations related to match configurations and rule sets include:

" + "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same region as the matchmaking configuration they will be used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

" }, "CreatePlayerSession":{ "name":"CreatePlayerSession", @@ -166,7 +166,7 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

Adds a player to a game session and creates a player session record. Before a player can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a group of players to a game session, use CreatePlayerSessions.

To create a player session, specify a game session ID, player ID, and optionally a string of player data. If successful, the player is added to the game session and a new PlayerSession object is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

Player-session-related operations include:

" + "documentation":"

Adds a player to a game session and creates a player session record. Before a player can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a group of players to a game session, use CreatePlayerSessions.

To create a player session, specify a game session ID, player ID, and optionally a string of player data. If successful, the player is added to the game session and a new PlayerSession object is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

" }, "CreatePlayerSessions":{ "name":"CreatePlayerSessions", @@ -185,7 +185,7 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

Adds a group of players to a game session. This action is useful with a team matching feature. Before players can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a single player to a game session, use CreatePlayerSession.

To create player sessions, specify a game session ID, a list of player IDs, and optionally a set of player data strings. If successful, the players are added to the game session and a set of new PlayerSession objects is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

Player-session-related operations include:

" + "documentation":"

Adds a group of players to a game session. This action is useful with a team matching feature. Before players can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a single player to a game session, use CreatePlayerSession.

To create player sessions, specify a game session ID, a list of player IDs, and optionally a set of player data strings. If successful, the players are added to the game session and a set of new PlayerSession objects is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

" }, "CreateVpcPeeringAuthorization":{ "name":"CreateVpcPeeringAuthorization", @@ -201,7 +201,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS account. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. Once you've received authorization, call CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

You can peer with VPCs that are owned by any AWS account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different regions.

To request authorization to create a connection, call this operation from the AWS account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the AWS account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

To request authorization to delete a connection, call this operation from the AWS account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the AWS account that you use to manage Amazon GameLift.

The authorization remains valid for 24 hours unless it is canceled by a call to DeleteVpcPeeringAuthorization. You must create or delete the peering connection while the authorization is valid.

VPC peering connection operations include:

" + "documentation":"

Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS account. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. Once you've received authorization, call CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

You can peer with VPCs that are owned by any AWS account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different regions.

To request authorization to create a connection, call this operation from the AWS account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the AWS account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

To request authorization to delete a connection, call this operation from the AWS account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the AWS account that you use to manage Amazon GameLift.

The authorization remains valid for 24 hours unless it is canceled by a call to DeleteVpcPeeringAuthorization. You must create or delete the peering connection while the authorization is valid.

" }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -217,7 +217,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Establishes a VPC peering connection between a virtual private cloud (VPC) in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. You can peer with VPCs in any AWS account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different regions. For more information, see VPC Peering with Amazon GameLift Fleets.

Before calling this operation to establish the peering connection, you first need to call CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

To establish the connection, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The AWS account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a VpcPeeringConnection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections, or by monitoring fleet events for success or failure using DescribeFleetEvents.

VPC peering connection operations include:

" + "documentation":"

Establishes a VPC peering connection between a virtual private cloud (VPC) in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. You can peer with VPCs in any AWS account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different regions. For more information, see VPC Peering with Amazon GameLift Fleets.

Before calling this operation to establish the peering connection, you first need to call CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

To establish the connection, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The AWS account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a VpcPeeringConnection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections, or by monitoring fleet events for success or failure using DescribeFleetEvents.

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -232,7 +232,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes an alias. This action removes all record of the alias. Game clients attempting to access a server process using the deleted alias receive an error. To delete an alias, specify the alias ID to be deleted.

Alias-related operations include:

" + "documentation":"

Deletes an alias. This action removes all record of the alias. Game clients attempting to access a server process using the deleted alias receive an error. To delete an alias, specify the alias ID to be deleted.

" }, "DeleteBuild":{ "name":"DeleteBuild", @@ -247,7 +247,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes a build. This action permanently deletes the build record and any uploaded build files.

To delete a build, specify its ID. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

Build-related operations include:

" + "documentation":"

Deletes a build. This action permanently deletes the build record and any uploaded build files.

To delete a build, specify its ID. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

" }, "DeleteFleet":{ "name":"DeleteFleet", @@ -263,7 +263,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Fleet-related operations include:

" + "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

" }, "DeleteGameSessionQueue":{ "name":"DeleteGameSessionQueue", @@ -279,7 +279,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

Queue-related operations include:

" + "documentation":"

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

" }, "DeleteMatchmakingConfiguration":{ "name":"DeleteMatchmakingConfiguration", @@ -295,7 +295,23 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Operations related to match configurations and rule sets include:

" + "documentation":"

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

" + }, + "DeleteMatchmakingRuleSet":{ + "name":"DeleteMatchmakingRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMatchmakingRuleSetInput"}, + "output":{"shape":"DeleteMatchmakingRuleSetOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

" }, "DeleteScalingPolicy":{ "name":"DeleteScalingPolicy", @@ -310,7 +326,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a fleet scaling policy. This action means that the policy is no longer in force and removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.

Operations related to fleet capacity scaling include:

" + "documentation":"

Deletes a fleet scaling policy. This action means that the policy is no longer in force and removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.

" }, "DeleteVpcPeeringAuthorization":{ "name":"DeleteVpcPeeringAuthorization", @@ -326,7 +342,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Cancels a pending VPC peering authorization for the specified VPC. If the authorization has already been used to create a peering connection, call DeleteVpcPeeringConnection to remove the connection.

VPC peering connection operations include:

" + "documentation":"

Cancels a pending VPC peering authorization for the specified VPC. If the authorization has already been used to create a peering connection, call DeleteVpcPeeringConnection to remove the connection.

" }, "DeleteVpcPeeringConnection":{ "name":"DeleteVpcPeeringConnection", @@ -342,7 +358,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete. You can check for an authorization by calling DescribeVpcPeeringAuthorizations or request a new one using CreateVpcPeeringAuthorization.

Once a valid authorization exists, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

VPC peering connection operations include:

" + "documentation":"

Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete. You can check for an authorization by calling DescribeVpcPeeringAuthorizations or request a new one using CreateVpcPeeringAuthorization.

Once a valid authorization exists, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

" }, "DescribeAlias":{ "name":"DescribeAlias", @@ -358,7 +374,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves properties for an alias. This operation returns all alias metadata and settings. To get an alias's target fleet ID only, use ResolveAlias.

To get alias properties, specify the alias ID. If successful, the requested alias record is returned.

Alias-related operations include:

" + "documentation":"

Retrieves properties for an alias. This operation returns all alias metadata and settings. To get an alias's target fleet ID only, use ResolveAlias.

To get alias properties, specify the alias ID. If successful, the requested alias record is returned.

" }, "DescribeBuild":{ "name":"DescribeBuild", @@ -374,7 +390,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves properties for a build. To request a build record, specify a build ID. If successful, an object containing the build properties is returned.

Build-related operations include:

" + "documentation":"

Retrieves properties for a build. To request a build record, specify a build ID. If successful, an object containing the build properties is returned.

" }, "DescribeEC2InstanceLimits":{ "name":"DescribeEC2InstanceLimits", @@ -389,7 +405,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on region. Available regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

Fleet-related operations include:

" + "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on region. Available regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

" }, "DescribeFleetAttributes":{ "name":"DescribeFleetAttributes", @@ -405,7 +421,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" + "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

" }, "DescribeFleetCapacity":{ "name":"DescribeFleetCapacity", @@ -421,7 +437,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" + "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

" }, "DescribeFleetEvents":{ "name":"DescribeFleetEvents", @@ -437,7 +453,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Fleet-related operations include:

" + "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

" }, "DescribeFleetPortSettings":{ "name":"DescribeFleetPortSettings", @@ -453,7 +469,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Fleet-related operations include:

" + "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

" }, "DescribeFleetUtilization":{ "name":"DescribeFleetUtilization", @@ -469,7 +485,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" + "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

" }, "DescribeGameSessionDetails":{ "name":"DescribeGameSessionDetails", @@ -486,7 +502,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves properties, including the protection policy in force, for one or more game sessions. This action can be used in several ways: (1) provide a GameSessionId or GameSessionArn to request details for a specific game session; (2) provide either a FleetId or an AliasId to request properties for all game sessions running on a fleet.

To get game session record(s), specify just one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionDetail object is returned for each session matching the request.

Game-session-related operations include:

" + "documentation":"

Retrieves properties, including the protection policy in force, for one or more game sessions. This action can be used in several ways: (1) provide a GameSessionId or GameSessionArn to request details for a specific game session; (2) provide either a FleetId or an AliasId to request properties for all game sessions running on a fleet.

To get game session record(s), specify just one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionDetail object is returned for each session matching the request.

" }, "DescribeGameSessionPlacement":{ "name":"DescribeGameSessionPlacement", @@ -502,7 +518,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves properties and current status of a game session placement request. To get game session placement details, specify the placement ID. If successful, a GameSessionPlacement object is returned.

Game-session-related operations include:

" + "documentation":"

Retrieves properties and current status of a game session placement request. To get game session placement details, specify the placement ID. If successful, a GameSessionPlacement object is returned.

" }, "DescribeGameSessionQueues":{ "name":"DescribeGameSessionQueues", @@ -518,7 +534,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the region.

Queue-related operations include:

" + "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the region.

" }, "DescribeGameSessions":{ "name":"DescribeGameSessions", @@ -535,7 +551,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves a set of one or more game sessions. Request a specific game session or request all game sessions on a fleet. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails.

To get game sessions, specify one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session matching the request.

Available in Amazon GameLift Local.

Game-session-related operations include:

" + "documentation":"

Retrieves a set of one or more game sessions. Request a specific game session or request all game sessions on a fleet. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails.

To get game sessions, specify one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session matching the request.

Available in Amazon GameLift Local.

" }, "DescribeInstances":{ "name":"DescribeInstances", @@ -566,7 +582,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

Matchmaking-related operations include:

" + "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

" }, "DescribeMatchmakingConfigurations":{ "name":"DescribeMatchmakingConfigurations", @@ -581,7 +597,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details of FlexMatch matchmaking configurations. with this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Operations related to match configurations and rule sets include:

" + "documentation":"

Retrieves the details of FlexMatch matchmaking configurations. with this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

" }, "DescribeMatchmakingRuleSets":{ "name":"DescribeMatchmakingRuleSets", @@ -597,7 +613,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Operations related to match configurations and rule sets include:

" + "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

" }, "DescribePlayerSessions":{ "name":"DescribePlayerSessions", @@ -613,7 +629,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves properties for one or more player sessions. This action can be used in several ways: (1) provide a PlayerSessionId to request properties for a specific player session; (2) provide a GameSessionId to request properties for all player sessions in the specified game session; (3) provide a PlayerId to request properties for all player sessions of a specified player.

To get game session record(s), specify only one of the following: a player session ID, a game session ID, or a player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a PlayerSession object is returned for each session matching the request.

Available in Amazon GameLift Local.

Player-session-related operations include:

" + "documentation":"

Retrieves properties for one or more player sessions. This action can be used in several ways: (1) provide a PlayerSessionId to request properties for a specific player session; (2) provide a GameSessionId to request properties for all player sessions in the specified game session; (3) provide a PlayerId to request properties for all player sessions of a specified player.

To get game session record(s), specify only one of the following: a player session ID, a game session ID, or a player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a PlayerSession object is returned for each session matching the request.

Available in Amazon GameLift Local.

" }, "DescribeRuntimeConfiguration":{ "name":"DescribeRuntimeConfiguration", @@ -629,7 +645,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the current run-time configuration for the specified fleet. The run-time configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

Fleet-related operations include:

" + "documentation":"

Retrieves the current run-time configuration for the specified fleet. The run-time configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

" }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -645,7 +661,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

A fleet may have all of its scaling policies suspended (StopFleetActions). This action does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.

Operations related to fleet capacity scaling include:

" + "documentation":"

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

A fleet may have all of its scaling policies suspended (StopFleetActions). This action does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.

" }, "DescribeVpcPeeringAuthorizations":{ "name":"DescribeVpcPeeringAuthorizations", @@ -660,7 +676,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves valid VPC peering authorizations that are pending for the AWS account. This operation returns all VPC peering authorizations and requests for peering. This includes those initiated and received by this account.

VPC peering connection operations include:

" + "documentation":"

Retrieves valid VPC peering authorizations that are pending for the AWS account. This operation returns all VPC peering authorizations and requests for peering. This includes those initiated and received by this account.

" }, "DescribeVpcPeeringConnections":{ "name":"DescribeVpcPeeringConnections", @@ -676,7 +692,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves information on VPC peering connections. Use this operation to get peering information for all fleets or for one specific fleet ID.

To retrieve connection information, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Specify a fleet ID or leave the parameter empty to retrieve all connection records. If successful, the retrieved information includes both active and pending connections. Active connections identify the IpV4 CIDR block that the VPC uses to connect.

VPC peering connection operations include:

" + "documentation":"

Retrieves information on VPC peering connections. Use this operation to get peering information for all fleets or for one specific fleet ID.

To retrieve connection information, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Specify a fleet ID or leave the parameter empty to retrieve all connection records. If successful, the retrieved information includes both active and pending connections. Active connections identify the IpV4 CIDR block that the VPC uses to connect.

" }, "GetGameSessionLogUrl":{ "name":"GetGameSessionLogUrl", @@ -692,7 +708,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

Game-session-related operations include:

" + "documentation":"

Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

" }, "GetInstanceAccess":{ "name":"GetInstanceAccess", @@ -708,7 +724,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or watching activity in real time.

Access requires credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the AWS CLI, saving the secret can be handled as part of the GetInstanceAccess request. (See the example later in this topic). For more information on remote access, see Remotely Accessing an Instance.

To request access to a specific instance, specify the IDs of the instance and the fleet it belongs to. If successful, an InstanceAccess object is returned containing the instance's IP address and a set of credentials.

" + "documentation":"

Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or watching activity in real time.

Access requires credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the AWS CLI, saving the secret can be handled as part of the GetInstanceAccess request. (See the example later in this topic). For more information on remote access, see Remotely Accessing an Instance.

To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances. If successful, an InstanceAccess object is returned containing the instance's IP address and a set of credentials.

" }, "ListAliases":{ "name":"ListAliases", @@ -723,7 +739,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves all aliases for this AWS account. You can filter the result set by alias name and/or routing strategy type. Use the pagination parameters to retrieve results in sequential pages.

Returned aliases are not listed in any particular order.

Alias-related operations include:

" + "documentation":"

Retrieves all aliases for this AWS account. You can filter the result set by alias name and/or routing strategy type. Use the pagination parameters to retrieve results in sequential pages.

Returned aliases are not listed in any particular order.

" }, "ListBuilds":{ "name":"ListBuilds", @@ -738,7 +754,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves build records for all builds associated with the AWS account in use. You can limit results to builds that are in a specific status by using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

Build records are not listed in any particular order.

Build-related operations include:

" + "documentation":"

Retrieves build records for all builds associated with the AWS account in use. You can limit results to builds that are in a specific status by using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

Build records are not listed in any particular order.

" }, "ListFleets":{ "name":"ListFleets", @@ -754,7 +770,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set by build ID. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in any particular order.

Fleet-related operations include:

" + "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set by build ID. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in any particular order.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -770,7 +786,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

You can temporarily suspend all scaling policies for a fleet by calling StopFleetActions with the fleet action AUTO_SCALING. To resume scaling policies, call StartFleetActions with the same fleet action. To stop just one scaling policy--or to permanently remove it, you must delete the policy with DeleteScalingPolicy.

Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

Target-based policy

A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Rule-based policy

A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

A policy's rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

To implement the example, the rule statement would look like this:

If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Operations related to fleet capacity scaling include:

" + "documentation":"

Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

You can temporarily suspend all scaling policies for a fleet by calling StopFleetActions with the fleet action AUTO_SCALING. To resume scaling policies, call StartFleetActions with the same fleet action. To stop just one scaling policy--or to permanently remove it, you must delete the policy with DeleteScalingPolicy.

Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

Target-based policy

A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Rule-based policy

A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

A policy's rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

To implement the example, the rule statement would look like this:

If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

" }, "RequestUploadCredentials":{ "name":"RequestUploadCredentials", @@ -803,7 +819,7 @@ {"shape":"TerminalRoutingStrategyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the fleet ID that a specified alias is currently pointing to.

Alias-related operations include:

" + "documentation":"

Retrieves the fleet ID that a specified alias is currently pointing to.

" }, "SearchGameSessions":{ "name":"SearchGameSessions", @@ -820,7 +836,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

  • gameSessionId -- Unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value.

  • gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.

  • gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. GameProperty values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings.

  • maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.

  • creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.

  • playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.

  • hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

Game-session-related operations include:

" + "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

  • gameSessionId -- Unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value.

  • gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.

  • gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. GameProperty values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings.

  • maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.

  • creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.

  • playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.

  • hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

" }, "StartFleetActions":{ "name":"StartFleetActions", @@ -836,7 +852,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

Operations related to fleet capacity scaling include:

" + "documentation":"

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

" }, "StartGameSessionPlacement":{ "name":"StartGameSessionPlacement", @@ -852,7 +868,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

  • The queue name and a set of game session properties and settings

  • A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request

  • (Optional) A set of IDs and player data for each player you want to join to the new game session

  • Latency data for all players (if you want to optimize game play for the players)

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

Game-session-related operations include:

" + "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

  • The queue name and a set of game session properties and settings

  • A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request

  • (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player)

  • Latency data for all players (if you want to optimize game play for the players)

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

" }, "StartMatchBackfill":{ "name":"StartMatchBackfill", @@ -868,7 +884,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Backfill Existing Games with FlexMatch.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Matchmaking-related operations include:

" + "documentation":"

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Backfill Existing Games with FlexMatch.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

" }, "StartMatchmaking":{ "name":"StartMatchmaking", @@ -884,7 +900,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Matchmaking-related operations include:

" + "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

" }, "StopFleetActions":{ "name":"StopFleetActions", @@ -916,7 +932,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Cancels a game session placement that is in PENDING status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED status.

Game-session-related operations include:

" + "documentation":"

Cancels a game session placement that is in PENDING status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED status.

" }, "StopMatchmaking":{ "name":"StopMatchmaking", @@ -932,7 +948,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

Matchmaking-related operations include:

" + "documentation":"

Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -948,7 +964,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates properties for an alias. To update properties, specify the alias ID to be updated and provide the information to be changed. To reassign an alias to another fleet, provide an updated routing strategy. If successful, the updated alias record is returned.

Alias-related operations include:

" + "documentation":"

Updates properties for an alias. To update properties, specify the alias ID to be updated and provide the information to be changed. To reassign an alias to another fleet, provide an updated routing strategy. If successful, the updated alias record is returned.

" }, "UpdateBuild":{ "name":"UpdateBuild", @@ -964,7 +980,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates metadata in a build record, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

Build-related operations include:

" + "documentation":"

Updates metadata in a build record, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

" }, "UpdateFleetAttributes":{ "name":"UpdateFleetAttributes", @@ -983,7 +999,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Fleet-related operations include:

" + "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

" }, "UpdateFleetCapacity":{ "name":"UpdateFleetCapacity", @@ -1002,7 +1018,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Fleet-related operations include:

" + "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

" }, "UpdateFleetPortSettings":{ "name":"UpdateFleetPortSettings", @@ -1021,7 +1037,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Fleet-related operations include:

" + "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

" }, "UpdateGameSession":{ "name":"UpdateGameSession", @@ -1039,7 +1055,7 @@ {"shape":"InvalidGameSessionStatusException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Updates game session properties. This includes the session name, maximum player count, protection policy, which controls whether or not an active game session can be terminated during a scale-down event, and the player session creation policy, which controls whether or not new players can join the session. To update a game session, specify the game session ID and the values you want to change. If successful, an updated GameSession object is returned.

Game-session-related operations include:

" + "documentation":"

Updates game session properties. This includes the session name, maximum player count, protection policy, which controls whether or not an active game session can be terminated during a scale-down event, and the player session creation policy, which controls whether or not new players can join the session. To update a game session, specify the game session ID and the values you want to change. If successful, an updated GameSession object is returned.

" }, "UpdateGameSessionQueue":{ "name":"UpdateGameSessionQueue", @@ -1055,7 +1071,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

Queue-related operations include:

" + "documentation":"

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

" }, "UpdateMatchmakingConfiguration":{ "name":"UpdateMatchmakingConfiguration", @@ -1071,7 +1087,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Updates settings for a FlexMatch matchmaking configuration. To update settings, specify the configuration name to be updated and provide the new settings.

Operations related to match configurations and rule sets include:

" + "documentation":"

Updates settings for a FlexMatch matchmaking configuration. To update settings, specify the configuration name to be updated and provide the new settings.

" }, "UpdateRuntimeConfiguration":{ "name":"UpdateRuntimeConfiguration", @@ -1088,7 +1104,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidFleetStatusException"} ], - "documentation":"

Updates the current run-time configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's run-time configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration object with the updated collection of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated run-time configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; they continue to run until they end, while Amazon GameLift simply adds new server processes to fit the current run-time configuration. As a result, the run-time configuration changes are applied gradually as existing processes shut down and new processes are launched in Amazon GameLift's normal process recycling activity.

Fleet-related operations include:

" + "documentation":"

Updates the current run-time configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's run-time configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration object with the updated collection of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated run-time configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; they continue to run until they end, while Amazon GameLift simply adds new server processes to fit the current run-time configuration. As a result, the run-time configuration changes are applied gradually as existing processes shut down and new processes are launched in Amazon GameLift's normal process recycling activity.

" }, "ValidateMatchmakingRuleSet":{ "name":"ValidateMatchmakingRuleSet", @@ -1103,7 +1119,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set uses syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set string.

Operations related to match configurations and rule sets include:

" + "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related operations

" } }, "shapes":{ @@ -1174,7 +1190,7 @@ "documentation":"

Time stamp indicating when this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Properties describing a fleet alias.

Alias-related operations include:

" + "documentation":"

Properties describing a fleet alias.

" }, "AliasId":{ "type":"string", @@ -1264,7 +1280,7 @@ "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Properties describing a game build.

Build-related operations include:

" + "documentation":"

Properties describing a game build.

" }, "BuildId":{ "type":"string", @@ -1401,7 +1417,7 @@ }, "LogPaths":{ "shape":"StringList", - "documentation":"

This parameter is no longer used. Instead, to specify where Amazon GameLift should store log files once a server process shuts down, use the Amazon GameLift server API ProcessReady() and specify one or more directory paths in logParameters. See more information in the Server API Reference.

" + "documentation":"

This parameter is no longer used. Instead, to specify where Amazon GameLift should store log files once a server process shuts down, use the Amazon GameLift server API ProcessReady() and specify one or more directory paths in logParameters. See more information in the Server API Reference.

" }, "EC2InstanceType":{ "shape":"EC2InstanceType", @@ -1425,19 +1441,19 @@ }, "MetricGroups":{ "shape":"MetricGroupList", - "documentation":"

Name of a metric group to add this fleet to. A metric group tracks metrics across all fleets in the group. Use an existing metric group name to add this fleet to the group, or use a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" + "documentation":"

Name of an Amazon CloudWatch metric group to add this fleet to. A metric group aggregates the metrics for all fleets in the group. Specify an existing metric group name, or provide a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

Unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.

" + "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" }, "FleetType":{ "shape":"FleetType", - "documentation":"

Indicates whether to use on-demand instances or spot instances for this fleet. If empty, the default is ON_DEMAND. Both categories of instances use identical hardware and configurations, based on the instance type selected for this fleet. You can acquire on-demand instances at any time for a fixed price and keep them as long as you need them. Spot instances have lower prices, but spot pricing is variable, and while in use they can be interrupted (with a two-minute notification). Learn more about Amazon GameLift spot instances with at Choose Computing Resources.

" + "documentation":"

Indicates whether to use on-demand instances or spot instances for this fleet. If empty, the default is ON_DEMAND. Both categories of instances use identical hardware and configurations, based on the instance type selected for this fleet. You can acquire on-demand instances at any time for a fixed price and keep them as long as you need them. Spot instances have lower prices, but spot pricing is variable, and while in use they can be interrupted (with a two-minute notification). Learn more about Amazon GameLift spot instances with at Set up Access to External Services.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1474,7 +1490,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "CreatorId":{ "shape":"NonZeroAndMaxString", @@ -1490,7 +1506,7 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request action.

" @@ -1558,7 +1574,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -1590,11 +1606,11 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1618,11 +1634,11 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set. This name is used to identify the rule set associated with a matchmaking configuration.

" + "documentation":"

Unique identifier for a matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. (Note: The rule set name is different from the optional \"name\" field in the rule set body.)

" }, "RuleSetBody":{ "shape":"RuleSetBody", - "documentation":"

Collection of matchmaking rules, formatted as a JSON string. (Note that comments are not allowed in JSON, but most elements support a description field.)

" + "documentation":"

Collection of matchmaking rules, formatted as a JSON string. Note that comments are not allowed in JSON, but most elements support a description field.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1715,7 +1731,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.

" + "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1748,7 +1764,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.

" + "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1828,6 +1844,23 @@ "members":{ } }, + "DeleteMatchmakingRuleSetInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set to be deleted. (Note: The rule set name is different from the optional \"name\" field in the rule set body.)

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "DeleteMatchmakingRuleSetOutput":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "DeleteScalingPolicyInput":{ "type":"structure", "required":[ @@ -1859,7 +1892,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.

" + "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2352,7 +2385,7 @@ "members":{ "Names":{ "shape":"MatchmakingRuleSetNameList", - "documentation":"

Unique identifier for a matchmaking rule set. This name is used to identify the rule set associated with a matchmaking configuration.

" + "documentation":"

List of one or more matchmaking rule set names to retrieve details for. (Note: The rule set name is different from the optional \"name\" field in the rule set body.)

" }, "Limit":{ "shape":"RuleSetLimit", @@ -2568,7 +2601,7 @@ "documentation":"

Number of instances in the fleet that are no longer active but haven't yet been terminated.

" } }, - "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

Fleet-related operations include:

" + "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

" }, "EC2InstanceLimit":{ "type":"structure", @@ -2644,7 +2677,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

Type of event being logged. The following events are currently in use:

Fleet creation events:

  • FLEET_CREATED -- A fleet record was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

  • FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's run-time configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the run-time configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. A possible reason for failure is that the game server is not reporting \"process ready\" to the Amazon GameLift service.

  • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEET_DELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

" + "documentation":"

Type of event being logged. The following events are currently in use:

Fleet creation events:

  • FLEET_CREATED -- A fleet record was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

  • FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's run-time configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the run-time configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. A possible reason for failure is that the game server is not reporting \"process ready\" to the Amazon GameLift service.

  • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEET_DELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

" }, "Message":{ "shape":"NonEmptyString", @@ -2766,7 +2799,7 @@ }, "LogPaths":{ "shape":"StringList", - "documentation":"

Location of default log files. When a server process is shut down, Amazon GameLift captures and stores any log files in this location. These logs are in addition to game session logs; see more on game session logs in the Amazon GameLift Developer Guide. If no default log path for a fleet is specified, Amazon GameLift automatically uploads logs that are stored on each instance at C:\\game\\logs (for Windows) or /local/game/logs (for Linux). Use the Amazon GameLift console to access stored logs.

" + "documentation":"

Location of default log files. When a server process is shut down, Amazon GameLift captures and stores any log files in this location. These logs are in addition to game session logs; see more on game session logs in the Amazon GameLift Developer Guide. If no default log path for a fleet is specified, Amazon GameLift automatically uploads logs that are stored on each instance at C:\\game\\logs (for Windows) or /local/game/logs (for Linux). Use the Amazon GameLift console to access stored logs.

" }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", @@ -2789,7 +2822,7 @@ "documentation":"

List of fleet actions that have been suspended using StopFleetActions. This includes auto-scaling.

" } }, - "documentation":"

General properties describing a fleet.

Fleet-related operations include:

" + "documentation":"

General properties describing a fleet.

" }, "FleetAttributesList":{ "type":"list", @@ -2811,7 +2844,7 @@ "documentation":"

Current status of fleet capacity.

" } }, - "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

Fleet-related operations include:

" + "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

" }, "FleetCapacityExceededException":{ "type":"structure", @@ -2879,7 +2912,7 @@ "documentation":"

Maximum players allowed across all game sessions currently being hosted on all instances in the fleet.

" } }, - "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

Fleet-related operations include:

" + "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

" }, "FleetUtilizationList":{ "type":"list", @@ -2903,7 +2936,7 @@ "documentation":"

Game property value.

" } }, - "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session, such as to specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session; the server process uses the properties as appropriate. For more information, see the Amazon GameLift Developer Guide.

" + "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session, such as to specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session; the server process uses the properties as appropriate. For more information, see the Amazon GameLift Developer Guide.

" }, "GamePropertyKey":{ "type":"string", @@ -2959,7 +2992,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.

" }, "IpAddress":{ "shape":"IpAddress", @@ -2979,14 +3012,14 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" + "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" } }, - "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

Game-session-related operations include:

" + "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

" }, "GameSessionActivationTimeoutSeconds":{ "type":"integer", @@ -2998,7 +3031,7 @@ "members":{ "GameSessionArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" }, "IpAddress":{ "shape":"StringModel", @@ -3067,7 +3100,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -3115,11 +3148,11 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" + "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" } }, "documentation":"

Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.

Game session placement-related operations include:

" @@ -3142,7 +3175,7 @@ }, "GameSessionQueueArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", @@ -3157,7 +3190,7 @@ "documentation":"

List of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" } }, - "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

  • The destinations where a new game session can potentially be hosted. Amazon GameLift tries these destinations in an order based on either the queue's default order or player latency information, if provided in a placement request. With latency information, Amazon GameLift can place game sessions where the majority of players are reporting the lowest possible latency.

  • The length of time that placement requests can wait in the queue before timing out.

  • A set of optional latency policies that protect individual players from high latencies, preventing game sessions from being placed where any individual player is reporting latency higher than a policy's maximum.

Queue-related operations include:

" + "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

  • The destinations where a new game session can potentially be hosted. Amazon GameLift tries these destinations in an order based on either the queue's default order or player latency information, if provided in a placement request. With latency information, Amazon GameLift can place game sessions where the majority of players are reporting the lowest possible latency.

  • The length of time that placement requests can wait in the queue before timing out.

  • A set of optional latency policies that protect individual players from high latencies, preventing game sessions from being placed where any individual player is reporting latency higher than a policy's maximum.

" }, "GameSessionQueueDestination":{ "type":"structure", @@ -3167,7 +3200,7 @@ "documentation":"

Amazon Resource Name (ARN) assigned to fleet or fleet alias. ARNs, which include a fleet ID or alias ID and a region name, provide a unique identifier across all regions.

" } }, - "documentation":"

Fleet designated in a game session queue. Requests for new game sessions in the queue are fulfilled by starting a new game session on any destination configured for a queue.

Queue-related operations include:

" + "documentation":"

Fleet designated in a game session queue. Requests for new game sessions in the queue are fulfilled by starting a new game session on any destination configured for a queue.

" }, "GameSessionQueueDestinationList":{ "type":"list", @@ -3217,7 +3250,7 @@ "members":{ "PreSignedUrl":{ "shape":"NonZeroAndMaxString", - "documentation":"

Location of the requested game session logs, available for download.

" + "documentation":"

Location of the requested game session logs, available for download. This URL is valid for 15 minutes, after which S3 will reject any download request using this URL. You can request a new URL any time within the 14-day period that the logs are retained.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -3584,7 +3617,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -3620,11 +3653,11 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" } }, "documentation":"

Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

" @@ -3678,7 +3711,7 @@ "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build a certain kind of player match. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

" + "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build a certain kind of player match. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

" }, "MatchmakingRuleSetList":{ "type":"list", @@ -3817,7 +3850,7 @@ "documentation":"

Unique identifier for a player session.

" } }, - "documentation":"

Information about a player session that was created as part of a StartGameSessionPlacement request. This object contains only the player ID and player session ID. To retrieve full details on a player session, call DescribePlayerSessions with the player session ID.

Player-session-related operations include:

" + "documentation":"

Information about a player session that was created as part of a StartGameSessionPlacement request. This object contains only the player ID and player session ID. To retrieve full details on a player session, call DescribePlayerSessions with the player session ID.

" }, "PlacedPlayerSessionList":{ "type":"list", @@ -3900,7 +3933,7 @@ "documentation":"

The length of time, in seconds, that the policy is enforced while placing a new game session. A null value for this property means that the policy is enforced until the queue times out.

" } }, - "documentation":"

Queue setting that determines the highest latency allowed for individual players when placing a game session. When a latency policy is in force, a game session cannot be placed at any destination in a region where a player is reporting latency higher than the cap. Latency policies are only enforced when the placement request contains player latency information.

Queue-related operations include:

" + "documentation":"

Queue setting that determines the highest latency allowed for individual players when placing a game session. When a latency policy is in force, a game session cannot be placed at any destination in a region where a player is reporting latency higher than the cap. Latency policies are only enforced when the placement request contains player latency information.

" }, "PlayerLatencyPolicyList":{ "type":"list", @@ -3954,7 +3987,7 @@ "documentation":"

Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

" } }, - "documentation":"

Properties describing a player session. Player session objects are created either by creating a player session for a specific game session, or as part of a game session placement. A player session represents either a player reservation for a game session (status RESERVED) or actual player activity in a game session (status ACTIVE). A player session object (including player data) is automatically passed to a game session when the player connects to the game session and is validated.

When a player disconnects, the player session status changes to COMPLETED. Once the session ends, the player session object is retained for 30 days and then removed.

Player-session-related operations include:

" + "documentation":"

Properties describing a player session. Player session objects are created either by creating a player session for a specific game session, or as part of a game session placement. A player session represents either a player reservation for a game session (status RESERVED) or actual player activity in a game session (status ACTIVE). A player session object (including player data) is automatically passed to a game session when the player connects to the game session and is validated.

When a player disconnects, the player session status changes to COMPLETED. Once the session ends, the player session object is retained for 30 days and then removed.

" }, "PlayerSessionCreationPolicy":{ "type":"string", @@ -4045,7 +4078,7 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

  • ActivatingGameSessions -- Game sessions in the process of being created.

  • ActiveGameSessions -- Game sessions that are currently running.

  • ActiveInstances -- Fleet instances that are currently running at least one game session.

  • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

  • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

  • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

  • IdleInstances -- Active instances that are currently hosting zero game sessions.

  • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

  • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

  • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

  • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

" + "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

  • ActivatingGameSessions -- Game sessions in the process of being created.

  • ActiveGameSessions -- Game sessions that are currently running.

  • ActiveInstances -- Fleet instances that are currently running at least one game session.

  • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

  • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

  • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

  • IdleInstances -- Active instances that are currently hosting zero game sessions.

  • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

  • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

  • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

  • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

" }, "PolicyType":{ "shape":"PolicyType", @@ -4148,7 +4181,7 @@ "documentation":"

Message text to be used with a terminal routing strategy.

" } }, - "documentation":"

Routing configuration for a fleet alias.

Fleet-related operations include:

" + "documentation":"

Routing configuration for a fleet alias.

" }, "RoutingStrategyType":{ "type":"string", @@ -4183,7 +4216,7 @@ "documentation":"

Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" } }, - "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. All fleets must have a run-time configuration. Each instance in the fleet launches the server processes specified in the run-time configuration and launches new ones as existing processes end. Each instance regularly checks for an updated run-time configuration and follows the new instructions.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Potential scenarios are as follows: (1) Run multiple processes of a single game server executable to maximize usage of your hosting resources. (2) Run one or more processes of different build executables, such as your game server executable and a related program, or two or more different versions of a game server. (3) Run multiple processes of a single game server but with different launch parameters, for example to run one process on each instance in debug mode.

A Amazon GameLift instance is limited to 50 processes running simultaneously. A run-time configuration must specify fewer than this limit. To calculate the total number of processes specified in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the run-time configuration.

Fleet-related operations include:

" + "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. All fleets must have a run-time configuration. Each instance in the fleet launches the server processes specified in the run-time configuration and launches new ones as existing processes end. Each instance regularly checks for an updated run-time configuration and follows the new instructions.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Potential scenarios are as follows: (1) Run multiple processes of a single game server executable to maximize usage of your hosting resources. (2) Run one or more processes of different build executables, such as your game server executable and a related program, or two or more different versions of a game server. (3) Run multiple processes of a single game server but with different launch parameters, for example to run one process on each instance in debug mode.

A Amazon GameLift instance is limited to 50 processes running simultaneously. A run-time configuration must specify fewer than this limit. To calculate the total number of processes specified in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the run-time configuration.

" }, "S3Location":{ "type":"structure", @@ -4198,10 +4231,10 @@ }, "RoleArn":{ "shape":"NonEmptyString", - "documentation":"

Amazon Resource Name (ARN) for the access role that allows Amazon GameLift to access your S3 bucket.

" + "documentation":"

Amazon Resource Name (ARN) for the access role that allows Amazon GameLift to access your S3 bucket.

" } }, - "documentation":"

Location in Amazon Simple Storage Service (Amazon S3) where build files can be stored for access by Amazon GameLift. This location is specified in a CreateBuild request. For more details, see the Create a Build with Files in Amazon S3.

" + "documentation":"

Location in Amazon Simple Storage Service (Amazon S3) where build files can be stored for access by Amazon GameLift. This location is specified in a CreateBuild request. For more details, see the Create a Build with Files in Amazon S3.

" }, "ScalingAdjustmentType":{ "type":"string", @@ -4248,7 +4281,7 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

  • ActivatingGameSessions -- Game sessions in the process of being created.

  • ActiveGameSessions -- Game sessions that are currently running.

  • ActiveInstances -- Fleet instances that are currently running at least one game session.

  • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

  • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

  • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

  • IdleInstances -- Active instances that are currently hosting zero game sessions.

  • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

  • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

  • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

  • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

" + "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

  • ActivatingGameSessions -- Game sessions in the process of being created.

  • ActiveGameSessions -- Game sessions that are currently running.

  • ActiveInstances -- Fleet instances that are currently running at least one game session.

  • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

  • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

  • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

  • IdleInstances -- Active instances that are currently hosting zero game sessions.

  • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

  • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

  • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

  • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

" }, "PolicyType":{ "shape":"PolicyType", @@ -4259,7 +4292,7 @@ "documentation":"

Object that contains settings for a target-based scaling policy.

" } }, - "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

Operations related to fleet capacity scaling include:

" + "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

" }, "ScalingPolicyList":{ "type":"list", @@ -4395,7 +4428,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -4415,7 +4448,7 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request action.

" @@ -4448,11 +4481,11 @@ }, "GameSessionArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" }, "Players":{ "shape":"PlayerList", - "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

  • PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.

  • LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the region that the game session is currently in. Do not include latency values for any other region.

" + "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

  • PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.

  • LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the region that the game session is currently in. Do not include latency values for any other region.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4577,7 +4610,7 @@ "documentation":"

Desired value to use with a target-based scaling policy. The value must be relevant for whatever metric the scaling policy is using. For example, in a policy using the metric PercentAvailableGameSessions, the target value should be the preferred size of the fleet's buffer (the percent of capacity that should be idle and ready for new game sessions).

" } }, - "documentation":"

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

Operations related to fleet capacity scaling include:

" + "documentation":"

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

" }, "TerminalRoutingStrategyException":{ "type":"structure", @@ -4853,7 +4886,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -4873,7 +4906,7 @@ }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" + "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", @@ -4885,11 +4918,11 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4966,7 +4999,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.

" + "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" }, "CreationTime":{ "shape":"Timestamp", @@ -4977,7 +5010,7 @@ "documentation":"

Time stamp indicating when this authorization expires (24 hours after issuance). Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

VPC peering connection operations include:

" + "documentation":"

Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

" }, "VpcPeeringAuthorizationList":{ "type":"list", @@ -5004,14 +5037,14 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.

" + "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" }, "GameLiftVpcId":{ "shape":"NonZeroAndMaxString", "documentation":"

Unique identifier for the VPC that contains the Amazon GameLift fleet for this connection. This VPC is managed by Amazon GameLift and does not appear in your AWS account.

" } }, - "documentation":"

Represents a peering connection between a VPC on one of your AWS accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

VPC peering connection operations include:

" + "documentation":"

Represents a peering connection between a VPC on one of your AWS accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

" }, "VpcPeeringConnectionList":{ "type":"list", @@ -5029,12 +5062,12 @@ "documentation":"

Additional messaging associated with the connection status.

" } }, - "documentation":"

Represents status information for a VPC peering connection. Status is associated with a VpcPeeringConnection object. Status codes and messages are provided from EC2 (see VpcPeeringConnectionStateReason). Connection status information is also communicated as a fleet Event.

" + "documentation":"

Represents status information for a VPC peering connection. Status is associated with a VpcPeeringConnection object. Status codes and messages are provided from EC2 (see VpcPeeringConnectionStateReason). Connection status information is also communicated as a fleet Event.

" }, "WholeNumber":{ "type":"integer", "min":0 } }, - "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.

The Amazon GameLift service API includes two important function sets:

  • Manage game sessions and player access -- Retrieve information on available game sessions; create new game sessions; send player requests to join a game session.

  • Configure and manage game server resources -- Manage builds, fleets, queues, and aliases; set auto-scaling policies; retrieve logs and metrics.

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

  • The Amazon Web Services software development kit (AWS SDK) is available in multiple languages including C++ and C#. Use the SDK to access the API programmatically from an application, such as a game client.

  • The AWS command-line interface (CLI) tool is primarily useful for handling administrative actions, such as setting up and managing Amazon GameLift settings and resources. You can use the AWS CLI to manage all of your AWS services.

  • The AWS Management Console for Amazon GameLift provides a web interface to manage your Amazon GameLift settings and resources. The console includes a dashboard for tracking key resources, including builds and fleets, and displays usage and performance metrics for your games as customizable graphs.

  • Amazon GameLift Local is a tool for testing your game's integration with Amazon GameLift before deploying it on the service. This tools supports a subset of key API actions, which can be called from either the AWS CLI or programmatically. See Testing an Integration.

Learn more

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

  • Discover existing game sessions

    • SearchGameSessions -- Retrieve all available game sessions or search for game sessions that match a set of criteria.

  • Start new game sessions

    • Start new games with Queues to find the best available hosting resources across multiple regions, minimize player latency, and balance game session activity for efficiency and cost effectiveness.

    • CreateGameSession -- Start a new game session on a specific fleet. Available in Amazon GameLift Local.

  • Match players to game sessions with FlexMatch matchmaking

    • StartMatchmaking -- Request matchmaking for one players or a group who want to play together.

    • StartMatchBackfill - Request additional player matches to fill empty slots in an existing game session.

    • DescribeMatchmaking -- Get details on a matchmaking request, including status.

    • AcceptMatch -- Register that a player accepts a proposed match, for matches that require player acceptance.

    • StopMatchmaking -- Cancel a matchmaking request.

  • Manage game session data

    • DescribeGameSessions -- Retrieve metadata for one or more game sessions, including length of time active and current player count. Available in Amazon GameLift Local.

    • DescribeGameSessionDetails -- Retrieve metadata and the game session protection setting for one or more game sessions.

    • UpdateGameSession -- Change game session settings, such as maximum player count and join policy.

    • GetGameSessionLogUrl -- Get the location of saved logs for a game session.

  • Manage player sessions

    • CreatePlayerSession -- Send a request for a player to join a game session. Available in Amazon GameLift Local.

    • CreatePlayerSessions -- Send a request for multiple players to join a game session. Available in Amazon GameLift Local.

    • DescribePlayerSessions -- Get details on player activity, including status, playing time, and player data. Available in Amazon GameLift Local.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" + "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.

The Amazon GameLift service API includes two important function sets:

  • Manage game sessions and player access -- Retrieve information on available game sessions; create new game sessions; send player requests to join a game session.

  • Configure and manage game server resources -- Manage builds, fleets, queues, and aliases; set auto-scaling policies; retrieve logs and metrics.

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

  • The Amazon Web Services software development kit (AWS SDK) is available in multiple languages including C++ and C#. Use the SDK to access the API programmatically from an application, such as a game client.

  • The AWS command-line interface (CLI) tool is primarily useful for handling administrative actions, such as setting up and managing Amazon GameLift settings and resources. You can use the AWS CLI to manage all of your AWS services.

  • The AWS Management Console for Amazon GameLift provides a web interface to manage your Amazon GameLift settings and resources. The console includes a dashboard for tracking key resources, including builds and fleets, and displays usage and performance metrics for your games as customizable graphs.

  • Amazon GameLift Local is a tool for testing your game's integration with Amazon GameLift before deploying it on the service. This tools supports a subset of key API actions, which can be called from either the AWS CLI or programmatically. See Testing an Integration.

Learn more

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

  • Discover existing game sessions

    • SearchGameSessions -- Retrieve all available game sessions or search for game sessions that match a set of criteria.

  • Start new game sessions

    • Start new games with Queues to find the best available hosting resources across multiple regions, minimize player latency, and balance game session activity for efficiency and cost effectiveness.

    • CreateGameSession -- Start a new game session on a specific fleet. Available in Amazon GameLift Local.

  • Match players to game sessions with FlexMatch matchmaking

    • StartMatchmaking -- Request matchmaking for one players or a group who want to play together.

    • StartMatchBackfill - Request additional player matches to fill empty slots in an existing game session.

    • DescribeMatchmaking -- Get details on a matchmaking request, including status.

    • AcceptMatch -- Register that a player accepts a proposed match, for matches that require player acceptance.

    • StopMatchmaking -- Cancel a matchmaking request.

  • Manage game session data

    • DescribeGameSessions -- Retrieve metadata for one or more game sessions, including length of time active and current player count. Available in Amazon GameLift Local.

    • DescribeGameSessionDetails -- Retrieve metadata and the game session protection setting for one or more game sessions.

    • UpdateGameSession -- Change game session settings, such as maximum player count and join policy.

    • GetGameSessionLogUrl -- Get the location of saved logs for a game session.

  • Manage player sessions

    • CreatePlayerSession -- Send a request for a player to join a game session. Available in Amazon GameLift Local.

    • CreatePlayerSessions -- Send a request for multiple players to join a game session. Available in Amazon GameLift Local.

    • DescribePlayerSessions -- Get details on player activity, including status, playing time, and player data. Available in Amazon GameLift Local.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" } diff --git a/botocore/data/globalaccelerator/2018-08-08/paginators-1.json b/botocore/data/globalaccelerator/2018-08-08/paginators-1.json index ea142457..ed79e0ad 100644 --- a/botocore/data/globalaccelerator/2018-08-08/paginators-1.json +++ b/botocore/data/globalaccelerator/2018-08-08/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "ListAccelerators": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Accelerators" + }, + "ListEndpointGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "EndpointGroups" + }, + "ListListeners": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Listeners" + } + } } diff --git a/botocore/data/glue/2017-03-31/paginators-1.json b/botocore/data/glue/2017-03-31/paginators-1.json index e90ffafe..9ef561c9 100644 --- a/botocore/data/glue/2017-03-31/paginators-1.json +++ b/botocore/data/glue/2017-03-31/paginators-1.json @@ -77,6 +77,12 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "GetSecurityConfigurations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SecurityConfigurations" } } } diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index f267142f..79b41377 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -93,6 +93,51 @@ ], "documentation":"

Deletes a specified batch of versions of a table.

" }, + "BatchGetCrawlers":{ + "name":"BatchGetCrawlers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetCrawlersRequest"}, + "output":{"shape":"BatchGetCrawlersResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Returns a list of resource metadata for a given list of crawler names. After calling the ListCrawlers operation, you can call this operation to access the data to which you have been granted permissions to based on tags.

" + }, + "BatchGetDevEndpoints":{ + "name":"BatchGetDevEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetDevEndpointsRequest"}, + "output":{"shape":"BatchGetDevEndpointsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of resource metadata for a given list of DevEndpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

" + }, + "BatchGetJobs":{ + "name":"BatchGetJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetJobsRequest"}, + "output":{"shape":"BatchGetJobsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of resource metadata for a given list of job names. After calling the ListJobs operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

" + }, "BatchGetPartition":{ "name":"BatchGetPartition", "http":{ @@ -110,6 +155,21 @@ ], "documentation":"

Retrieves partitions in a batch request.

" }, + "BatchGetTriggers":{ + "name":"BatchGetTriggers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetTriggersRequest"}, + "output":{"shape":"BatchGetTriggersResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

" + }, "BatchStopJobRun":{ "name":"BatchStopJobRun", "http":{ @@ -995,6 +1055,22 @@ ], "documentation":"

Retrieves the definitions of some or all of the tables in a given Database.

" }, + "GetTags":{ + "name":"GetTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTagsRequest"}, + "output":{"shape":"GetTagsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Retrieves a list of tags associated with a resource.

" + }, "GetTrigger":{ "name":"GetTrigger", "http":{ @@ -1075,6 +1151,67 @@ ], "documentation":"

Imports an existing Athena Data Catalog to AWS Glue

" }, + "ListCrawlers":{ + "name":"ListCrawlers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCrawlersRequest"}, + "output":{"shape":"ListCrawlersResponse"}, + "errors":[ + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the names of all crawler resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag will be retrieved.

" + }, + "ListDevEndpoints":{ + "name":"ListDevEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDevEndpointsRequest"}, + "output":{"shape":"ListDevEndpointsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the names of all DevEndpoint resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag will be retrieved.

" + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListJobsRequest"}, + "output":{"shape":"ListJobsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the names of all job resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag will be retrieved.

" + }, + "ListTriggers":{ + "name":"ListTriggers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTriggersRequest"}, + "output":{"shape":"ListTriggersResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the names of all trigger resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag will be retrieved.

" + }, "PutDataCatalogEncryptionSettings":{ "name":"PutDataCatalogEncryptionSettings", "http":{ @@ -1088,7 +1225,7 @@ {"shape":"InvalidInputException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Sets the security configuration for a specified catalog. Once the configuration has been set, the specified encryption is applied to every catalog write thereafter.

" + "documentation":"

Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.

" }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -1240,6 +1377,38 @@ ], "documentation":"

Stops a specified trigger.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Adds tags to a resource. A tag is a label you can assign to an AWS resource. In AWS Glue, you can tag only certain resources. For information about what resources you can tag, see AWS Tags in AWS Glue.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Removes tags from a resource.

" + }, "UpdateClassifier":{ "name":"UpdateClassifier", "http":{ @@ -1450,7 +1619,7 @@ }, "Arguments":{ "shape":"GenericMap", - "documentation":"

Arguments to be passed to the job run.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" + "documentation":"

The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" }, "Timeout":{ "shape":"Timeout", @@ -1524,7 +1693,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.

" }, "ConnectionNameList":{ "shape":"DeleteConnectionNameList", @@ -1663,6 +1832,75 @@ } } }, + "BatchGetCrawlersRequest":{ + "type":"structure", + "required":["CrawlerNames"], + "members":{ + "CrawlerNames":{ + "shape":"CrawlerNameList", + "documentation":"

A list of crawler names, which may be the names returned from the ListCrawlers operation.

" + } + } + }, + "BatchGetCrawlersResponse":{ + "type":"structure", + "members":{ + "Crawlers":{ + "shape":"CrawlerList", + "documentation":"

A list of crawler definitions.

" + }, + "CrawlersNotFound":{ + "shape":"CrawlerNameList", + "documentation":"

A list of crawlers not found.

" + } + } + }, + "BatchGetDevEndpointsRequest":{ + "type":"structure", + "required":["DevEndpointNames"], + "members":{ + "DevEndpointNames":{ + "shape":"DevEndpointNames", + "documentation":"

The list of DevEndpoint names, which may be the names returned from the ListDevEndpoint operation.

" + } + } + }, + "BatchGetDevEndpointsResponse":{ + "type":"structure", + "members":{ + "DevEndpoints":{ + "shape":"DevEndpointList", + "documentation":"

A list of DevEndpoint definitions.

" + }, + "DevEndpointsNotFound":{ + "shape":"DevEndpointNames", + "documentation":"

A list of DevEndpoints not found.

" + } + } + }, + "BatchGetJobsRequest":{ + "type":"structure", + "required":["JobNames"], + "members":{ + "JobNames":{ + "shape":"JobNameList", + "documentation":"

A list of job names, which may be the names returned from the ListJobs operation.

" + } + } + }, + "BatchGetJobsResponse":{ + "type":"structure", + "members":{ + "Jobs":{ + "shape":"JobList", + "documentation":"

A list of job definitions.

" + }, + "JobsNotFound":{ + "shape":"JobNameList", + "documentation":"

A list of names of jobs not found.

" + } + } + }, "BatchGetPartitionRequest":{ "type":"structure", "required":[ @@ -1708,6 +1946,29 @@ "max":1000, "min":0 }, + "BatchGetTriggersRequest":{ + "type":"structure", + "required":["TriggerNames"], + "members":{ + "TriggerNames":{ + "shape":"TriggerNameList", + "documentation":"

A list of trigger names, which may be the names returned from the ListTriggers operation.

" + } + } + }, + "BatchGetTriggersResponse":{ + "type":"structure", + "members":{ + "Triggers":{ + "shape":"TriggerList", + "documentation":"

A list of trigger definitions.

" + }, + "TriggersNotFound":{ + "shape":"TriggerNameList", + "documentation":"

A list of names of triggers not found.

" + } + } + }, "BatchStopJobRunError":{ "type":"structure", "members":{ @@ -2084,7 +2345,7 @@ }, "Description":{ "shape":"DescriptionString", - "documentation":"

Description of the connection.

" + "documentation":"

The description of the connection.

" }, "ConnectionType":{ "shape":"ConnectionType", @@ -2096,23 +2357,23 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the key you designate to encrypt the password.

  • JDBC_DRIVER_JAR_URI - The S3 path of the a jar file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use).

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for the JDBC connection.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether SSL with hostname matching will be enforced for the JDBC connection on the client. The default is false.

" + "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon S3 path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use).

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for the JDBC connection.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching will be enforced for the JDBC connection on the client. The default is false.

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", - "documentation":"

A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.

" + "documentation":"

A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

The time this connection definition was created.

" + "documentation":"

The time that this connection definition was created.

" }, "LastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

The last time this connection definition was updated.

" + "documentation":"

The last time that this connection definition was updated.

" }, "LastUpdatedBy":{ "shape":"NameString", - "documentation":"

The user, group or role that last updated this connection definition.

" + "documentation":"

The user, group, or role that last updated this connection definition.

" } }, "documentation":"

Defines a connection to a data source.

" @@ -2131,7 +2392,7 @@ }, "Description":{ "shape":"DescriptionString", - "documentation":"

Description of the connection.

" + "documentation":"

The description of the connection.

" }, "ConnectionType":{ "shape":"ConnectionType", @@ -2147,10 +2408,10 @@ }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", - "documentation":"

A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.

" + "documentation":"

A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

" } }, - "documentation":"

A structure used to specify a connection to create or update.

" + "documentation":"

A structure that is used to specify a connection to create or update.

" }, "ConnectionList":{ "type":"list", @@ -2167,10 +2428,10 @@ }, "AwsKmsKeyId":{ "shape":"NameString", - "documentation":"

A KMS key used to protect access to the JDBC source.

All users in your account should be granted the kms:encrypt permission to encrypt passwords before storing them in the Data Catalog (through the AWS Glue CreateConnection operation).

The decrypt permission should be granted only to KMS key admins and IAM roles designated for AWS Glue crawlers.

" + "documentation":"

An AWS KMS key that is used to encrypt the connection password.

If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.

You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.

" } }, - "documentation":"

The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.

When a CreationConnection request arrives containing a password, the Data Catalog first encrypts the password using your KMS key, and then encrypts the whole connection object again if catalog encryption is also enabled.

This encryption requires that you set KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you may want only admin users to have decrypt permission on the password key.

" + "documentation":"

The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.

When a CreationConnection request arrives containing a password, the Data Catalog first encrypts the password using your AWS KMS key. It then encrypts the whole connection object again if catalog encryption is also enabled.

This encryption requires that you set AWS KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you might want only admin users to have decrypt permission on the password key.

" }, "ConnectionProperties":{ "type":"map", @@ -2433,7 +2694,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog in which to create the connection. If none is provided, the AWS account ID is used by default.

" }, "ConnectionInput":{ "shape":"ConnectionInput", @@ -2498,6 +2759,10 @@ "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", "documentation":"

The name of the SecurityConfiguration structure to be used by this Crawler.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" } } }, @@ -2571,6 +2836,10 @@ "SecurityConfiguration":{ "shape":"NameString", "documentation":"

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" } } }, @@ -2712,12 +2981,18 @@ }, "AllocatedCapacity":{ "shape":"IntegerValue", - "documentation":"

The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" + "documentation":"

This parameter is deprecated. Use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use MaxCapacity instead." }, "Timeout":{ "shape":"Timeout", "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" }, + "MaxCapacity":{ + "shape":"NullableDouble", + "documentation":"

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

The value that can be allocated for MaxCapacity depends on whether you are running a python shell job, or an Apache Spark ETL job:

  • When you specify a python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

" + }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job notification.

" @@ -2725,6 +3000,10 @@ "SecurityConfiguration":{ "shape":"NameString", "documentation":"

The name of the SecurityConfiguration structure to be used with this job.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

The tags to use with this job. You may use tags to limit access to the job. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" } } }, @@ -2907,6 +3186,10 @@ "StartOnCreation":{ "shape":"BooleanValue", "documentation":"

Set to true to start SCHEDULED and CONDITIONAL triggers when created. True not supported for ON_DEMAND triggers.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

The tags to use with this trigger. You may use tags to limit access to the trigger. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" } } }, @@ -2987,11 +3270,11 @@ "members":{ "EncryptionAtRest":{ "shape":"EncryptionAtRest", - "documentation":"

Specifies encryption-at-rest configuration for the Data Catalog.

" + "documentation":"

Specifies the encryption-at-rest configuration for the Data Catalog.

" }, "ConnectionPasswordEncryption":{ "shape":"ConnectionPasswordEncryption", - "documentation":"

When password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.

" + "documentation":"

When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.

" } }, "documentation":"

Contains configuration information for maintaining Data Catalog security.

" @@ -3086,7 +3369,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.

" }, "ConnectionName":{ "shape":"NameString", @@ -3449,6 +3732,16 @@ "type":"list", "member":{"shape":"DevEndpoint"} }, + "DevEndpointNameList":{ + "type":"list", + "member":{"shape":"NameString"} + }, + "DevEndpointNames":{ + "type":"list", + "member":{"shape":"GenericString"}, + "max":25, + "min":1 + }, "DynamoDBTarget":{ "type":"structure", "members":{ @@ -3476,7 +3769,7 @@ "documentation":"

The ID of the AWS KMS key to use for encryption at rest.

" } }, - "documentation":"

Specifies encryption-at-rest configuration for the Data Catalog.

" + "documentation":"

Specifies the encryption-at-rest configuration for the Data Catalog.

" }, "EncryptionConfiguration":{ "type":"structure", @@ -3633,7 +3926,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.

" }, "Name":{ "shape":"NameString", @@ -3641,7 +3934,7 @@ }, "HidePassword":{ "shape":"Boolean", - "documentation":"

Allow you to retrieve the connection metadata without displaying the password. For instance, the AWS Glue console uses this flag to retrieve connections, since the console does not display passwords. Set this parameter where the caller may not have permission to use the KMS key to decrypt the password, but does have permission to access the rest of the connection metadata (that is, the other connection properties).

" + "documentation":"

Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but does have permission to access the rest of the connection properties.

" } } }, @@ -3666,14 +3959,14 @@ "documentation":"

The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.

" } }, - "documentation":"

Filters the connection definitions returned by the GetConnections API.

" + "documentation":"

Filters the connection definitions that are returned by the GetConnections API operation.

" }, "GetConnectionsRequest":{ "type":"structure", "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.

" }, "Filter":{ "shape":"GetConnectionsFilter", @@ -3681,7 +3974,7 @@ }, "HidePassword":{ "shape":"Boolean", - "documentation":"

Allow you to retrieve the connection metadata without displaying the password. For instance, the AWS Glue console uses this flag to retrieve connections, since the console does not display passwords. Set this parameter where the caller may not have permission to use the KMS key to decrypt the password, but does have permission to access the rest of the connection metadata (that is, the other connection properties).

" + "documentation":"

Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but does have permission to access the rest of the connection properties.

" }, "NextToken":{ "shape":"Token", @@ -3786,7 +4079,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog for which to retrieve the security configuration. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog for which to retrieve the security configuration. If none is provided, the AWS account ID is used by default.

" } } }, @@ -4401,6 +4694,25 @@ } } }, + "GetTagsRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon ARN of the resource for which to retrieve tags.

" + } + } + }, + "GetTagsResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagsMap", + "documentation":"

The requested tags.

" + } + } + }, "GetTriggerRequest":{ "type":"structure", "required":["Name"], @@ -4533,6 +4845,12 @@ "documentation":"

An encryption operation failed.

", "exception":true }, + "GlueResourceArn":{ + "type":"string", + "max":10240, + "min":1, + "pattern":"arn:aws:glue:.*" + }, "GrokClassifier":{ "type":"structure", "required":[ @@ -4716,12 +5034,18 @@ }, "AllocatedCapacity":{ "shape":"IntegerValue", - "documentation":"

The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" + "documentation":"

This field is deprecated, use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use MaxCapacity instead." }, "Timeout":{ "shape":"Timeout", "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" }, + "MaxCapacity":{ + "shape":"NullableDouble", + "documentation":"

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

The value that can be allocated for MaxCapacity depends on whether you are running a python shell job, or an Apache Spark ETL job:

  • When you specify a python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

" + }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job notification.

" @@ -4785,7 +5109,7 @@ "members":{ "Name":{ "shape":"GenericString", - "documentation":"

The name of the job command: this must be glueetl.

" + "documentation":"

The name of the job command: this must be glueetl, for an Apache Spark ETL job, or pythonshell, for a Python shell job.

" }, "ScriptLocation":{ "shape":"ScriptLocationString", @@ -4799,6 +5123,10 @@ "member":{"shape":"Job"} }, "JobName":{"type":"string"}, + "JobNameList":{ + "type":"list", + "member":{"shape":"NameString"} + }, "JobRun":{ "type":"structure", "members":{ @@ -4840,7 +5168,7 @@ }, "Arguments":{ "shape":"GenericMap", - "documentation":"

The job arguments associated with this run. These override equivalent default arguments set for the job.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" + "documentation":"

The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" }, "ErrorMessage":{ "shape":"ErrorString", @@ -4852,7 +5180,9 @@ }, "AllocatedCapacity":{ "shape":"IntegerValue", - "documentation":"

The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" + "documentation":"

This field is deprecated, use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use MaxCapacity instead." }, "ExecutionTime":{ "shape":"ExecutionTime", @@ -4862,6 +5192,10 @@ "shape":"Timeout", "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

" }, + "MaxCapacity":{ + "shape":"NullableDouble", + "documentation":"

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

The value that can be allocated for MaxCapacity depends on whether you are running a python shell job, or an Apache Spark ETL job:

  • When you specify a python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

" + }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job run notification.

" @@ -4930,12 +5264,18 @@ }, "AllocatedCapacity":{ "shape":"IntegerValue", - "documentation":"

The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" + "documentation":"

This field is deprecated. Use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use MaxCapacity instead." }, "Timeout":{ "shape":"Timeout", "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" }, + "MaxCapacity":{ + "shape":"NullableDouble", + "documentation":"

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

The value that can be allocated for MaxCapacity depends on whether you are running a python shell job, or an Apache Spark ETL job:

  • When you specify a python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

" + }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job notification.

" @@ -5034,6 +5374,130 @@ "FAILED" ] }, + "ListCrawlersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of a list to return.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation request.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Specifies to return only these tagged resources.

" + } + } + }, + "ListCrawlersResponse":{ + "type":"structure", + "members":{ + "CrawlerNames":{ + "shape":"CrawlerNameList", + "documentation":"

The names of all crawlers in the account, or the crawlers with the specified tags.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the returned list does not contain the last metric available.

" + } + } + }, + "ListDevEndpointsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation request.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of a list to return.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Specifies to return only these tagged resources.

" + } + } + }, + "ListDevEndpointsResponse":{ + "type":"structure", + "members":{ + "DevEndpointNames":{ + "shape":"DevEndpointNameList", + "documentation":"

The names of all DevEndpoints in the account, or the DevEndpoints with the specified tags.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if the returned list does not contain the last metric available.

" + } + } + }, + "ListJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation request.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of a list to return.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Specifies to return only these tagged resources.

" + } + } + }, + "ListJobsResponse":{ + "type":"structure", + "members":{ + "JobNames":{ + "shape":"JobNameList", + "documentation":"

The names of all jobs in the account, or the jobs with the specified tags.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if the returned list does not contain the last metric available.

" + } + } + }, + "ListTriggersRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation request.

" + }, + "DependentJobName":{ + "shape":"NameString", + "documentation":"

The name of the job for which to retrieve triggers. The trigger that can start this job will be returned, and if there is no such trigger, all triggers will be returned.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of a list to return.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

Specifies to return only these tagged resources.

" + } + } + }, + "ListTriggersResponse":{ + "type":"structure", + "members":{ + "TriggerNames":{ + "shape":"TriggerNameList", + "documentation":"

The names of all triggers in the account, or the triggers with the specified tags.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if the returned list does not contain the last metric available.

" + } + } + }, "Location":{ "type":"structure", "members":{ @@ -5179,6 +5643,10 @@ "box":true, "min":1 }, + "NullableDouble":{ + "type":"double", + "box":true + }, "OperationTimeoutException":{ "type":"structure", "members":{ @@ -5288,7 +5756,7 @@ "members":{ "Values":{ "shape":"ValueStringList", - "documentation":"

The values of the partition.

" + "documentation":"

The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.

" }, "LastAccessTime":{ "shape":"Timestamp", @@ -5348,7 +5816,7 @@ }, "AvailabilityZone":{ "shape":"NameString", - "documentation":"

The connection's availability zone. This field is redundant, since the specified subnet implies the availability zone to be used. The field must be populated now, but will be deprecated in the future.

" + "documentation":"

The connection's Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.

" } }, "documentation":"

Specifies the physical requirements for a connection.

" @@ -5415,7 +5883,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog for which to set the security configuration. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog for which to set the security configuration. If none is provided, the AWS account ID is used by default.

" }, "DataCatalogEncryptionSettings":{ "shape":"DataCatalogEncryptionSettings", @@ -5438,7 +5906,7 @@ }, "PolicyHashCondition":{ "shape":"HashString", - "documentation":"

This is the hash value returned when the previous policy was set using PutResourcePolicy. Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.

" + "documentation":"

The hash value returned when the previous policy was set using PutResourcePolicy. Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.

" }, "PolicyExistsCondition":{ "shape":"ExistCondition", @@ -5763,16 +6231,22 @@ }, "Arguments":{ "shape":"GenericMap", - "documentation":"

The job arguments specifically for this run. They override the equivalent default arguments set for in the job definition itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" + "documentation":"

The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" }, "AllocatedCapacity":{ "shape":"IntegerValue", - "documentation":"

The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" + "documentation":"

This field is deprecated, use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use MaxCapacity instead." }, "Timeout":{ "shape":"Timeout", "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

" }, + "MaxCapacity":{ + "shape":"NullableDouble", + "documentation":"

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

The value that can be allocated for MaxCapacity depends on whether you are running a python shell job, or an Apache Spark ETL job:

  • When you specify a python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

  • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

" + }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job run notification.

" @@ -6112,6 +6586,51 @@ "type":"list", "member":{"shape":"TableVersionError"} }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeysList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagsToAdd" + ], + "members":{ + "ResourceArn":{ + "shape":"GlueResourceArn", + "documentation":"

The ARN of the AWS Glue resource to which to add the tags. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern.

" + }, + "TagsToAdd":{ + "shape":"TagsMap", + "documentation":"

Tags to add to this resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, "Timeout":{ "type":"integer", "box":true, @@ -6167,6 +6686,10 @@ "type":"list", "member":{"shape":"Trigger"} }, + "TriggerNameList":{ + "type":"list", + "member":{"shape":"NameString"} + }, "TriggerState":{ "type":"string", "enum":[ @@ -6220,6 +6743,28 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagsToRemove" + ], + "members":{ + "ResourceArn":{ + "shape":"GlueResourceArn", + "documentation":"

The ARN of the resource from which to remove the tags.

" + }, + "TagsToRemove":{ + "shape":"TagKeysList", + "documentation":"

Tags to remove from this resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateBehavior":{ "type":"string", "enum":[ @@ -6258,7 +6803,7 @@ "members":{ "CatalogId":{ "shape":"CatalogIdString", - "documentation":"

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

" + "documentation":"

The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.

" }, "Name":{ "shape":"NameString", diff --git a/botocore/data/greengrass/2017-06-07/paginators-1.json b/botocore/data/greengrass/2017-06-07/paginators-1.json new file mode 100644 index 00000000..303b4384 --- /dev/null +++ b/botocore/data/greengrass/2017-06-07/paginators-1.json @@ -0,0 +1,118 @@ +{ + "pagination": { + "ListBulkDeploymentDetailedReports": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Deployments" + }, + "ListBulkDeployments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "BulkDeployments" + }, + "ListConnectorDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListConnectorDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + }, + "ListCoreDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListCoreDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + }, + "ListDeployments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Deployments" + }, + "ListDeviceDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListDeviceDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + }, + "ListFunctionDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListFunctionDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + }, + "ListGroupVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Groups" + }, + "ListLoggerDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListLoggerDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + }, + "ListResourceDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListResourceDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + }, + "ListSubscriptionDefinitionVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListSubscriptionDefinitions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Definitions" + } + } +} diff --git a/botocore/data/inspector/2016-02-16/paginators-1.json b/botocore/data/inspector/2016-02-16/paginators-1.json index 83d2ad2c..8dec0410 100644 --- a/botocore/data/inspector/2016-02-16/paginators-1.json +++ b/botocore/data/inspector/2016-02-16/paginators-1.json @@ -47,6 +47,12 @@ "output_token": "nextToken", "input_token": "nextToken", "limit_key": "maxResults" + }, + "ListExclusions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "exclusionArns" } } } diff --git a/botocore/data/iot/2015-05-28/paginators-1.json b/botocore/data/iot/2015-05-28/paginators-1.json index cc1934b2..8d7fefb5 100644 --- a/botocore/data/iot/2015-05-28/paginators-1.json +++ b/botocore/data/iot/2015-05-28/paginators-1.json @@ -65,6 +65,161 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "rules" + }, + "ListActiveViolations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "activeViolations" + }, + "ListAttachedPolicies": { + "input_token": "marker", + "limit_key": "pageSize", + "output_token": "nextMarker", + "result_key": "policies" + }, + "ListAuditFindings": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "findings" + }, + "ListAuditTasks": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "tasks" + }, + "ListAuthorizers": { + "input_token": "marker", + "limit_key": "pageSize", + "output_token": "nextMarker", + "result_key": "authorizers" + }, + "ListBillingGroups": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "billingGroups" + }, + "ListIndices": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "indexNames" + }, + "ListJobExecutionsForJob": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "executionSummaries" + }, + "ListJobExecutionsForThing": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "executionSummaries" + }, + "ListJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "jobs" + }, + "ListOTAUpdates": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "otaUpdates" + }, + "ListRoleAliases": { + "input_token": "marker", + "limit_key": "pageSize", + "output_token": "nextMarker", + "result_key": "roleAliases" + }, + "ListScheduledAudits": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "scheduledAudits" + }, + "ListSecurityProfiles": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "securityProfileIdentifiers" + }, + "ListSecurityProfilesForTarget": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "securityProfileTargetMappings" + }, + "ListStreams": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "streams" + }, + "ListTagsForResource": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tags" + }, + "ListTargetsForPolicy": { + "input_token": "marker", + "limit_key": "pageSize", + "output_token": "nextMarker", + "result_key": "targets" + }, + "ListTargetsForSecurityProfile": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "securityProfileTargets" + }, + "ListThingGroups": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "thingGroups" + }, + "ListThingGroupsForThing": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "thingGroups" + }, + "ListThingRegistrationTasks": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "taskIds" + }, + "ListThingsInBillingGroup": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "things" + }, + "ListThingsInThingGroup": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "things" + }, + "ListV2LoggingLevels": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "logTargetConfigurations" + }, + "ListViolationEvents": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "violationEvents" } } } diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 9f675467..6f3abfa1 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -149,7 +149,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Attaches the specified principal to the specified thing.

" + "documentation":"

Attaches the specified principal to the specified thing. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.

" }, "CancelAuditTask":{ "name":"CancelAuditTask", @@ -489,7 +489,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a thing record in the registry.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

" + "documentation":"

Creates a thing record in the registry.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

" }, "CreateThingGroup":{ "name":"CreateThingGroup", @@ -505,7 +505,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Create a thing group.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

" + "documentation":"

Create a thing group.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

" }, "CreateThingType":{ "name":"CreateThingType", @@ -1325,7 +1325,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Detaches the specified principal from the specified thing.

This call is asynchronous. It might take several seconds for the detachment to propagate.

" + "documentation":"

Detaches the specified principal from the specified thing. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.

This call is asynchronous. It might take several seconds for the detachment to propagate.

" }, "DisableTopicRule":{ "name":"DisableTopicRule", @@ -1846,7 +1846,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists the policies attached to the specified principal. If you use an Cognito identity, the ID must be in AmazonCognito Identity format.

Note: This API is deprecated. Please use ListAttachedPolicies instead.

", + "documentation":"

Lists the policies attached to the specified principal. If you use an Cognito identity, the ID must be in AmazonCognito Identity format.

Note: This API is deprecated. Please use ListAttachedPolicies instead.

", "deprecated":true }, "ListPrincipalThings":{ @@ -1865,7 +1865,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the things associated with the specified principal.

" + "documentation":"

Lists the things associated with the specified principal. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.

" }, "ListRoleAliases":{ "name":"ListRoleAliases", @@ -2044,7 +2044,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the principals associated with the specified thing.

" + "documentation":"

Lists the principals associated with the specified thing. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.

" }, "ListThingRegistrationTaskReports":{ "name":"ListThingRegistrationTaskReports", @@ -3095,6 +3095,10 @@ "members":{ } }, + "AdditionalMetricsToRetainList":{ + "type":"list", + "member":{"shape":"BehaviorMetric"} + }, "AdditionalParameterMap":{ "type":"map", "key":{"shape":"AttributeKey"}, @@ -3693,7 +3697,7 @@ "members":{ "comparisonOperator":{ "shape":"ComparisonOperator", - "documentation":"

The operator that relates the thing measured (metric) to the criteria (value).

" + "documentation":"

The operator that relates the thing measured (metric) to the criteria (containing a value or statisticalThreshold).

" }, "value":{ "shape":"MetricValue", @@ -3701,7 +3705,19 @@ }, "durationSeconds":{ "shape":"DurationSeconds", - "documentation":"

Use this to specify the period of time over which the behavior is evaluated, for those criteria which have a time dimension (for example, NUM_MESSAGES_SENT).

" + "documentation":"

Use this to specify the time duration over which the behavior is evaluated, for those criteria which have a time dimension (for example, NUM_MESSAGES_SENT). For a statisticalThreshhold metric comparison, measurements from all devices are accumulated over this time duration before being used to calculate percentiles, and later, measurements from an individual device are also accumulated over this time duration before being given a percentile rank.

" + }, + "consecutiveDatapointsToAlarm":{ + "shape":"ConsecutiveDatapointsToAlarm", + "documentation":"

If a device is in violation of the behavior for the specified number of consecutive datapoints, an alarm occurs. If not specified, the default is 1.

" + }, + "consecutiveDatapointsToClear":{ + "shape":"ConsecutiveDatapointsToClear", + "documentation":"

If an alarm has occurred and the offending device is no longer in violation of the behavior for the specified number of consecutive datapoints, the alarm is cleared. If not specified, the default is 1.

" + }, + "statisticalThreshold":{ + "shape":"StatisticalThreshold", + "documentation":"

A statistical ranking (percentile) which indicates a threshold value by which a behavior is determined to be in compliance or in violation of the behavior.

" } }, "documentation":"

The criteria by which the behavior is determined to be normal.

" @@ -4211,11 +4227,11 @@ }, "metricUnit":{ "shape":"String", - "documentation":"

The metric unit supported by CloudWatch.

" + "documentation":"

The metric unit supported by CloudWatch.

" }, "metricTimestamp":{ "shape":"String", - "documentation":"

An optional Unix timestamp.

" + "documentation":"

An optional Unix timestamp.

" } }, "documentation":"

Describes an action that captures a CloudWatch metric.

" @@ -4306,6 +4322,16 @@ "exception":true }, "ConnectivityTimestamp":{"type":"long"}, + "ConsecutiveDatapointsToAlarm":{ + "type":"integer", + "max":10, + "min":1 + }, + "ConsecutiveDatapointsToClear":{ + "type":"integer", + "max":10, + "min":1 + }, "Count":{"type":"integer"}, "CreateAuthorizerRequest":{ "type":"structure", @@ -4448,7 +4474,7 @@ }, "queryString":{ "shape":"QueryString", - "documentation":"

The dynamic thing group search query string.

See Query Syntax for information about query string syntax.

" + "documentation":"

The dynamic thing group search query string.

See Query Syntax for information about query string syntax.

" }, "queryVersion":{ "shape":"QueryVersion", @@ -4815,6 +4841,10 @@ "shape":"TargetAuditCheckNames", "documentation":"

Which checks are performed during the scheduled audit. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration to select which checks are enabled.)

" }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the scheduled audit.

" + }, "scheduledAuditName":{ "shape":"ScheduledAuditName", "documentation":"

The name you want to give to the scheduled audit. (Max. 128 chars)

", @@ -4834,10 +4864,7 @@ }, "CreateSecurityProfileRequest":{ "type":"structure", - "required":[ - "securityProfileName", - "behaviors" - ], + "required":["securityProfileName"], "members":{ "securityProfileName":{ "shape":"SecurityProfileName", @@ -4857,6 +4884,10 @@ "shape":"AlertTargets", "documentation":"

Specifies the destinations to which alerts are sent. (Alerts are always sent to the console.) Alerts are generated when a device (thing) violates a behavior.

" }, + "additionalMetricsToRetain":{ + "shape":"AdditionalMetricsToRetainList", + "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors but it is also retained for any metric specified here.

" + }, "tags":{ "shape":"TagList", "documentation":"

Metadata which can be used to manage the security profile.

" @@ -5064,6 +5095,12 @@ "topicRulePayload":{ "shape":"TopicRulePayload", "documentation":"

The rule payload.

" + }, + "tags":{ + "shape":"String", + "documentation":"

Metadata which can be used to manage the topic rule.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: --tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

", + "location":"header", + "locationName":"x-amz-tagging" } }, "documentation":"

The input for the CreateTopicRule operation.

", @@ -5135,6 +5172,8 @@ "members":{ } }, + "DeleteAdditionalMetricsToRetain":{"type":"boolean"}, + "DeleteAlertTargets":{"type":"boolean"}, "DeleteAuthorizerRequest":{ "type":"structure", "required":["authorizerName"], @@ -5152,6 +5191,7 @@ "members":{ } }, + "DeleteBehaviors":{"type":"boolean"}, "DeleteBillingGroupRequest":{ "type":"structure", "required":["billingGroupName"], @@ -6012,6 +6052,10 @@ "shape":"AlertTargets", "documentation":"

Where the alerts are sent. (Alerts are always sent to the console.)

" }, + "additionalMetricsToRetain":{ + "shape":"AdditionalMetricsToRetainList", + "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors but it is also retained for any metric specified here.

" + }, "version":{ "shape":"Version", "documentation":"

The version of the security profile. A new version is generated whenever the security profile is updated.

" @@ -6459,6 +6503,10 @@ }, "DynamoDBv2Action":{ "type":"structure", + "required":[ + "roleArn", + "putItem" + ], "members":{ "roleArn":{ "shape":"AwsArn", @@ -6576,6 +6624,10 @@ "type":"string", "max":2048 }, + "EvaluationStatistic":{ + "type":"string", + "pattern":"(p0|p0\\.1|p0\\.01|p1|p10|p50|p90|p99|p99\\.9|p99\\.99|p100)" + }, "EventConfigurations":{ "type":"map", "key":{"shape":"EventType"}, @@ -7220,15 +7272,15 @@ }, "createdAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job was created.

" + "documentation":"

The time, in seconds since the epoch, when the job was created.

" }, "lastUpdatedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job was last updated.

" + "documentation":"

The time, in seconds since the epoch, when the job was last updated.

" }, "completedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job was completed.

" + "documentation":"

The time, in seconds since the epoch, when the job was completed.

" }, "jobProcessDetails":{ "shape":"JobProcessDetails", @@ -7281,15 +7333,15 @@ }, "queuedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job execution was queued.

" + "documentation":"

The time, in seconds since the epoch, when the job execution was queued.

" }, "startedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job execution started.

" + "documentation":"

The time, in seconds since the epoch, when the job execution started.

" }, "lastUpdatedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job execution was last updated.

" + "documentation":"

The time, in seconds since the epoch, when the job execution was last updated.

" }, "executionNumber":{ "shape":"ExecutionNumber", @@ -7347,15 +7399,15 @@ }, "queuedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job execution was queued.

" + "documentation":"

The time, in seconds since the epoch, when the job execution was queued.

" }, "startedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job execution started.

" + "documentation":"

The time, in seconds since the epoch, when the job execution started.

" }, "lastUpdatedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job execution was last updated.

" + "documentation":"

The time, in seconds since the epoch, when the job execution was last updated.

" }, "executionNumber":{ "shape":"ExecutionNumber", @@ -7496,15 +7548,15 @@ }, "createdAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job was created.

" + "documentation":"

The time, in seconds since the epoch, when the job was created.

" }, "lastUpdatedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job was last updated.

" + "documentation":"

The time, in seconds since the epoch, when the job was last updated.

" }, "completedAt":{ "shape":"DateType", - "documentation":"

The time, in milliseconds since the epoch, when the job completed.

" + "documentation":"

The time, in seconds since the epoch, when the job completed.

" } }, "documentation":"

The job summary.

" @@ -9842,11 +9894,11 @@ "members":{ "templateBody":{ "shape":"TemplateBody", - "documentation":"

The provisioning template. See Programmatic Provisioning for more information.

" + "documentation":"

The provisioning template. See Programmatic Provisioning for more information.

" }, "parameters":{ "shape":"Parameters", - "documentation":"

The parameters for provisioning a thing. See Programmatic Provisioning for more information.

" + "documentation":"

The parameters for provisioning a thing. See Programmatic Provisioning for more information.

" } } }, @@ -10234,7 +10286,7 @@ }, "cannedAcl":{ "shape":"CannedAccessControlList", - "documentation":"

The Amazon S3 canned ACL that controls access to the object identified by the object key. For more information, see S3 canned ACLs.

" + "documentation":"

The Amazon S3 canned ACL that controls access to the object identified by the object key. For more information, see S3 canned ACLs.

" } }, "documentation":"

Describes an action to write data to an Amazon S3 bucket.

" @@ -10620,7 +10672,7 @@ }, "messageFormat":{ "shape":"MessageFormat", - "documentation":"

(Optional) The message format of the message to publish. Accepted values are \"JSON\" and \"RAW\". The default value of the attribute is \"RAW\". SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.

" + "documentation":"

(Optional) The message format of the message to publish. Accepted values are \"JSON\" and \"RAW\". The default value of the attribute is \"RAW\". SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see https://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.

" } }, "documentation":"

Describes an action to publish to an Amazon SNS topic.

" @@ -10735,6 +10787,16 @@ "StateMachineName":{"type":"string"}, "StateReason":{"type":"string"}, "StateValue":{"type":"string"}, + "StatisticalThreshold":{ + "type":"structure", + "members":{ + "statistic":{ + "shape":"EvaluationStatistic", + "documentation":"

The percentile which resolves to a threshold value by which compliance with a behavior is determined. Metrics are collected over the specified period (durationSeconds) from all reporting devices in your account and statistical ranks are calculated. Then, the measurements from a device are collected over the same period. If the accumulated measurements from the device fall above or below (comparisonOperator) the value associated with the percentile specified, then the device is considered to be in compliance with the behavior, otherwise a violation occurs.

" + } + }, + "documentation":"

A statistical ranking (percentile) which indicates a threshold value by which a behavior is determined to be in compliance or in violation of the behavior.

" + }, "Status":{ "type":"string", "enum":[ @@ -11530,7 +11592,7 @@ "members":{ "sql":{ "shape":"SQL", - "documentation":"

The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide.

" + "documentation":"

The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide.

" }, "description":{ "shape":"Description", @@ -12032,6 +12094,22 @@ "shape":"AlertTargets", "documentation":"

Where the alerts are sent. (Alerts are always sent to the console.)

" }, + "additionalMetricsToRetain":{ + "shape":"AdditionalMetricsToRetainList", + "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors but it is also retained for any metric specified here.

" + }, + "deleteBehaviors":{ + "shape":"DeleteBehaviors", + "documentation":"

If true, delete all behaviors defined for this security profile. If any behaviors are defined in the current invocation an exception occurs.

" + }, + "deleteAlertTargets":{ + "shape":"DeleteAlertTargets", + "documentation":"

If true, delete all alertTargets defined for this security profile. If any alertTargets are defined in the current invocation an exception occurs.

" + }, + "deleteAdditionalMetricsToRetain":{ + "shape":"DeleteAdditionalMetricsToRetain", + "documentation":"

If true, delete all additionalMetricsToRetain defined for this security profile. If any additionalMetricsToRetain are defined in the current invocation an exception occurs.

" + }, "expectedVersion":{ "shape":"OptionalVersion", "documentation":"

The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different than the actual version, a VersionConflictException is thrown.

", @@ -12063,6 +12141,10 @@ "shape":"AlertTargets", "documentation":"

Where the alerts are sent. (Alerts are always sent to the console.)

" }, + "additionalMetricsToRetain":{ + "shape":"AdditionalMetricsToRetainList", + "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the security profile's behaviors but it is also retained for any metric specified here.

" + }, "version":{ "shape":"Version", "documentation":"

The updated version of the security profile.

" @@ -12338,5 +12420,5 @@ "resourceArn":{"type":"string"}, "resourceId":{"type":"string"} }, - "documentation":"AWS IoT

AWS IoT provides secure, bi-directional communication between Internet-connected devices (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each device (Registry), configure logging, and create and manage policies and credentials to authenticate devices.

For more information about how AWS IoT works, see the Developer Guide.

For information about how to use the credentials provider for AWS IoT, see Authorizing Direct Calls to AWS Services.

" + "documentation":"AWS IoT

AWS IoT provides secure, bi-directional communication between Internet-connected devices (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each device (Registry), configure logging, and create and manage policies and credentials to authenticate devices.

For more information about how AWS IoT works, see the Developer Guide.

For information about how to use the credentials provider for AWS IoT, see Authorizing Direct Calls to AWS Services.

" } diff --git a/botocore/data/iot1click-devices/2018-05-14/paginators-1.json b/botocore/data/iot1click-devices/2018-05-14/paginators-1.json new file mode 100644 index 00000000..237e5581 --- /dev/null +++ b/botocore/data/iot1click-devices/2018-05-14/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListDeviceEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Events" + }, + "ListDevices": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Devices" + } + } +} diff --git a/botocore/data/iot1click-projects/2018-05-14/paginators-1.json b/botocore/data/iot1click-projects/2018-05-14/paginators-1.json index ea142457..d17d5df6 100644 --- a/botocore/data/iot1click-projects/2018-05-14/paginators-1.json +++ b/botocore/data/iot1click-projects/2018-05-14/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListPlacements": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "placements" + }, + "ListProjects": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "projects" + } + } } diff --git a/botocore/data/iotanalytics/2017-11-27/paginators-1.json b/botocore/data/iotanalytics/2017-11-27/paginators-1.json index ea142457..d1bfaaaa 100644 --- a/botocore/data/iotanalytics/2017-11-27/paginators-1.json +++ b/botocore/data/iotanalytics/2017-11-27/paginators-1.json @@ -1,3 +1,34 @@ { - "pagination": {} + "pagination": { + "ListChannels": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "channelSummaries" + }, + "ListDatasetContents": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "datasetContentSummaries" + }, + "ListDatasets": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "datasetSummaries" + }, + "ListDatastores": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "datastoreSummaries" + }, + "ListPipelines": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "pipelineSummaries" + } + } } diff --git a/botocore/data/iotanalytics/2017-11-27/service-2.json b/botocore/data/iotanalytics/2017-11-27/service-2.json index 9d30746e..83aebb6e 100644 --- a/botocore/data/iotanalytics/2017-11-27/service-2.json +++ b/botocore/data/iotanalytics/2017-11-27/service-2.json @@ -99,7 +99,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates the content of a data set by applying a SQL action.

" + "documentation":"

Creates the content of a data set by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application).

" }, "CreateDatastore":{ "name":"CreateDatastore", @@ -681,7 +681,7 @@ }, "messages":{ "shape":"Messages", - "documentation":"

The list of messages to be sent. Each message has format: '{ \"messageId\": \"string\", \"payload\": \"string\"}'.

" + "documentation":"

The list of messages to be sent. Each message has format: '{ \"messageId\": \"string\", \"payload\": \"string\"}'.

Note that the field names of message payloads (data) that you send to AWS IoT Analytics:

  • Must contain only alphanumeric characters and undescores (_); no other special characters are allowed.

  • Must begin with an alphabetic character or single underscore (_).

  • Cannot contain hyphens (-).

  • In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$\".

  • Cannot be greater than 255 characters.

  • Are case-insensitive. (Fields named \"foo\" and \"FOO\" in the same payload are considered duplicates.)

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" } } }, @@ -932,7 +932,10 @@ "shape":"DatasetTriggers", "documentation":"

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" }, - "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"}, + "contentDeliveryRules":{ + "shape":"DatasetContentDeliveryRules", + "documentation":"

When data set contents are created they are delivered to destinations specified here.

" + }, "retentionPeriod":{ "shape":"RetentionPeriod", "documentation":"

[Optional] How long, in days, message data is kept for the data set. If not given or set to null, the latest version of the dataset content plus the latest succeeded version (if they are different) are retained for at most 90 days.

" @@ -1048,7 +1051,10 @@ "shape":"DatasetTriggers", "documentation":"

The \"DatasetTrigger\" objects that specify when the data set is automatically updated.

" }, - "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"}, + "contentDeliveryRules":{ + "shape":"DatasetContentDeliveryRules", + "documentation":"

When data set contents are created they are delivered to destinations specified here.

" + }, "status":{ "shape":"DatasetStatus", "documentation":"

The status of the data set.

" @@ -1077,14 +1083,14 @@ }, "queryAction":{ "shape":"SqlQueryDatasetAction", - "documentation":"

An \"SqlQueryDatasetAction\" object that contains the SQL query to modify the message.

" + "documentation":"

An \"SqlQueryDatasetAction\" object that uses an SQL query to automatically create data set contents.

" }, "containerAction":{ "shape":"ContainerDatasetAction", "documentation":"

Information which allows the system to run a containerized application in order to create the data set contents. The application must be in a Docker container along with any needed support libraries.

" } }, - "documentation":"

A \"DatasetAction\" object specifying the query that creates the data set content.

" + "documentation":"

A \"DatasetAction\" object that specifies how data set contents are automatically created.

" }, "DatasetActionName":{ "type":"string", @@ -1129,16 +1135,27 @@ "DatasetContentDeliveryDestination":{ "type":"structure", "members":{ - "iotEventsDestinationConfiguration":{"shape":"IotEventsDestinationConfiguration"} - } + "iotEventsDestinationConfiguration":{ + "shape":"IotEventsDestinationConfiguration", + "documentation":"

Configuration information for delivery of data set contents to AWS IoT Events.

" + } + }, + "documentation":"

The destination to which data set contents are delivered.

" }, "DatasetContentDeliveryRule":{ "type":"structure", "required":["destination"], "members":{ - "entryName":{"shape":"EntryName"}, - "destination":{"shape":"DatasetContentDeliveryDestination"} - } + "entryName":{ + "shape":"EntryName", + "documentation":"

The name of the data set content delivery rules entry.

" + }, + "destination":{ + "shape":"DatasetContentDeliveryDestination", + "documentation":"

The destination to which data set contents are delivered.

" + } + }, + "documentation":"

When data set contents are created they are delivered to destination specified here.

" }, "DatasetContentDeliveryRules":{ "type":"list", @@ -1205,10 +1222,10 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose latest contents will be used as input to the notebook or application.

" + "documentation":"

The name of the data set whose latest contents are used as input to the notebook or application.

" } }, - "documentation":"

The data set whose latest contents will be used as input to the notebook or application.

" + "documentation":"

The data set whose latest contents are used as input to the notebook or application.

" }, "DatasetEntries":{ "type":"list", @@ -1285,7 +1302,7 @@ }, "dataset":{ "shape":"TriggeringDataset", - "documentation":"

The data set whose content creation will trigger the creation of this data set's contents.

" + "documentation":"

The data set whose content creation triggers the creation of this data set's contents.

" } }, "documentation":"

The \"DatasetTrigger\" that specifies when the data set is automatically updated.

" @@ -1470,14 +1487,14 @@ "members":{ "offsetSeconds":{ "shape":"OffsetSeconds", - "documentation":"

The number of seconds of estimated \"in flight\" lag time of message data.

" + "documentation":"

The number of seconds of estimated \"in flight\" lag time of message data. When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of your message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" }, "timeExpression":{ "shape":"TimeExpression", "documentation":"

An expression by which the time of the message data may be determined. This may be the name of a timestamp field, or a SQL expression which is used to derive the time the message data was generated.

" } }, - "documentation":"

When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of your message data, so that data not processed from the previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" + "documentation":"

Used to limit data to that which has arrived since the last execution of the action.

" }, "DescribeChannelRequest":{ "type":"structure", @@ -1773,9 +1790,16 @@ "roleArn" ], "members":{ - "inputName":{"shape":"IotEventsInputName"}, - "roleArn":{"shape":"RoleArn"} - } + "inputName":{ + "shape":"IotEventsInputName", + "documentation":"

The name of the AWS IoT Events input to which data set contents are delivered.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to deliver data set contents to an AWS IoT Events input.

" + } + }, + "documentation":"

Configuration information for delivery of data set contents to AWS IoT Events.

" }, "IotEventsInputName":{ "type":"string", @@ -1876,6 +1900,18 @@ "documentation":"

The maximum number of results to return in this request.

", "location":"querystring", "locationName":"maxResults" + }, + "scheduledOnOrAfter":{ + "shape":"Timestamp", + "documentation":"

A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "location":"querystring", + "locationName":"scheduledOnOrAfter" + }, + "scheduledBefore":{ + "shape":"Timestamp", + "documentation":"

A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "location":"querystring", + "locationName":"scheduledBefore" } } }, @@ -2046,7 +2082,7 @@ }, "attribute":{ "shape":"AttributeName", - "documentation":"

The name of the attribute that will contain the result of the math operation.

" + "documentation":"

The name of the attribute that contains the result of the math operation.

" }, "math":{ "shape":"MathExpression", @@ -2123,7 +2159,7 @@ "documentation":"

The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.

" } }, - "documentation":"

The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.

" + "documentation":"

The value of the variable as a structure that specifies an output file URI.

" }, "Pipeline":{ "type":"structure", @@ -2256,7 +2292,7 @@ "members":{ "deltaTime":{ "shape":"DeltaTime", - "documentation":"

Used to limit data to that which has arrived since the last execution of the action. When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of you message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" + "documentation":"

Used to limit data to that which has arrived since the last execution of the action.

" } }, "documentation":"

Information which is used to filter message data, to segregate it according to the time frame in which it arrives.

" @@ -2602,7 +2638,7 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

The ARN of the resource whose tags will be modified.

", + "documentation":"

The ARN of the resource whose tags you want to modify.

", "location":"querystring", "locationName":"resourceArn" }, @@ -2639,10 +2675,10 @@ "members":{ "name":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose content generation will trigger the new data set content generation.

" + "documentation":"

The name of the data set whose content generation triggers the new data set content generation.

" } }, - "documentation":"

Information about the data set whose content generation will trigger the new data set content generation.

" + "documentation":"

Information about the data set whose content generation triggers the new data set content generation.

" }, "UnlimitedRetentionPeriod":{"type":"boolean"}, "UntagResourceRequest":{ @@ -2654,13 +2690,13 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

The ARN of the resource whose tags will be removed.

", + "documentation":"

The ARN of the resource whose tags you want to remove.

", "location":"querystring", "locationName":"resourceArn" }, "tagKeys":{ "shape":"TagKeyList", - "documentation":"

The keys of those tags which will be removed.

", + "documentation":"

The keys of those tags which you want to remove.

", "location":"querystring", "locationName":"tagKeys" } @@ -2708,7 +2744,10 @@ "shape":"DatasetTriggers", "documentation":"

A list of \"DatasetTrigger\" objects. The list can be empty or can contain up to five DataSetTrigger objects.

" }, - "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"}, + "contentDeliveryRules":{ + "shape":"DatasetContentDeliveryRules", + "documentation":"

When data set contents are created they are delivered to destinations specified here.

" + }, "retentionPeriod":{ "shape":"RetentionPeriod", "documentation":"

How long, in days, message data is kept for the data set.

" diff --git a/botocore/data/kafka/2018-11-14/paginators-1.json b/botocore/data/kafka/2018-11-14/paginators-1.json index ea142457..31df4946 100644 --- a/botocore/data/kafka/2018-11-14/paginators-1.json +++ b/botocore/data/kafka/2018-11-14/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListClusters": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ClusterInfoList" + }, + "ListNodes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "NodeInfoList" + } + } } diff --git a/botocore/data/kinesis-video-archived-media/2017-09-30/paginators-1.json b/botocore/data/kinesis-video-archived-media/2017-09-30/paginators-1.json index ea142457..f5e4d732 100644 --- a/botocore/data/kinesis-video-archived-media/2017-09-30/paginators-1.json +++ b/botocore/data/kinesis-video-archived-media/2017-09-30/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListFragments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Fragments" + } + } } diff --git a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json index 78176e37..d05ab62e 100644 --- a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json +++ b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json @@ -29,7 +29,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

You must specify either the StreamName or the StreamARN.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media type must be video/h264.

  • Data retention must be greater than 0.

  • The fragments must contain codec private data in the AVC (Advanced Video Coding) for H.264 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF), rather than the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bit rates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video decoder and renderer. The media fragments contain H.264-encoded video frames and time stamps.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action, and additional metadata for the media player, including estimated bit rate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream, which the media player needs to decode video frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's video frames and their time stamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data are excluded in the HLS media playlist. Therefore, the codec private data does not change between fragments in a session.

      Data retrieved with this action is billable. See Pricing for details.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of five active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

" + "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

You must specify either the StreamName or the StreamARN.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media must contain h.264 encoded video and, optionally, AAC encoded audio. Specifically, the codec id of track 1 should be V_MPEG/ISO/AVC. Optionally, the codec id of track 2 should be A_AAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF), rather than the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of five active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

" }, "GetMediaForFragmentList":{ "name":"GetMediaForFragmentList", @@ -45,7 +45,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

The following limits apply when using the GetMediaForFragmentList API:

  • A client can call GetMediaForFragmentList up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList session.

" + "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

  • A client can call GetMediaForFragmentList up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList session.

" }, "ListFragments":{ "name":"ListFragments", @@ -61,7 +61,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Returns a list of Fragment objects from the specified stream and start location within the archived data.

" + "documentation":"

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

" } }, "shapes":{ @@ -74,6 +74,13 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ContainerFormat":{ + "type":"string", + "enum":[ + "FRAGMENTED_MP4", + "MPEG_TS" + ] + }, "ContentType":{ "type":"string", "max":128, @@ -87,6 +94,13 @@ "NEVER" ] }, + "DisplayFragmentTimestamp":{ + "type":"string", + "enum":[ + "ALWAYS", + "NEVER" + ] + }, "ErrorMessage":{"type":"string"}, "Expires":{ "type":"integer", @@ -106,11 +120,11 @@ }, "ProducerTimestamp":{ "shape":"Timestamp", - "documentation":"

The time stamp from the producer corresponding to the fragment.

" + "documentation":"

The timestamp from the producer corresponding to the fragment.

" }, "ServerTimestamp":{ "shape":"Timestamp", - "documentation":"

The time stamp from the AWS server corresponding to the fragment.

" + "documentation":"

The timestamp from the AWS server corresponding to the fragment.

" }, "FragmentLengthInMilliseconds":{ "shape":"Long", @@ -144,14 +158,14 @@ "members":{ "FragmentSelectorType":{ "shape":"FragmentSelectorType", - "documentation":"

The origin of the time stamps to use (Server or Producer).

" + "documentation":"

The origin of the timestamps to use (Server or Producer).

" }, "TimestampRange":{ "shape":"TimestampRange", - "documentation":"

The range of time stamps to return.

" + "documentation":"

The range of timestamps to return.

" } }, - "documentation":"

Describes the time stamp range and time stamp origin of a range of fragments.

" + "documentation":"

Describes the timestamp range and timestamp origin of a range of fragments.

Only fragments with a start timestamp greater than or equal to the given start time and less than or equal to the end time are returned. For example, if a stream contains fragments with the following start timestamps:

  • 00:00:00

  • 00:00:02

  • 00:00:04

  • 00:00:06

A fragment selector range with a start time of 00:00:01 and end time of 00:00:04 would return the fragments with start times of 00:00:02 and 00:00:04.

" }, "FragmentSelectorType":{ "type":"string", @@ -173,15 +187,23 @@ }, "PlaybackMode":{ "shape":"PlaybackMode", - "documentation":"

Whether to retrieve live or archived, on-demand data.

Features of the two types of session include the following:

  • LIVE : For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.

    In LIVE mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.

  • ON_DEMAND : For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.

In both playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start time stamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different time stamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" + "documentation":"

Whether to retrieve live or archived, on-demand data.

Features of the two types of session include the following:

  • LIVE : For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.

    In LIVE mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.

  • ON_DEMAND : For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.

In both playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" }, "HLSFragmentSelector":{ "shape":"HLSFragmentSelector", - "documentation":"

The time range of the requested fragment, and the source of the time stamps.

This parameter is required if PlaybackMode is ON_DEMAND. This parameter is optional if PlaybackMode is LIVE. If PlaybackMode is LIVE, the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND, both FragmentSelectorType and TimestampRange must be set.

" + "documentation":"

The time range of the requested fragment, and the source of the timestamps.

This parameter is required if PlaybackMode is ON_DEMAND. This parameter is optional if PlaybackMode is LIVE. If PlaybackMode is LIVE, the FragmentSelectorType can be set, but the TimestampRange should not be set. If PlaybackMode is ON_DEMAND, both FragmentSelectorType and TimestampRange must be set.

" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "documentation":"

Specifies which format should be used for packaging the media. Specifying the FRAGMENTED_MP4 container format packages the media into MP4 fragments (fMP4 or CMAF). This is the recommended packaging because there is minimal packaging overhead. The other container format option is MPEG_TS. HLS has supported MPEG TS chunks since it was released and is sometimes the only supported packaging on older HLS players. MPEG TS typically has a 5-25 percent packaging overhead. This means MPEG TS typically requires 5-25 percent more bandwidth and cost than fMP4.

The default is FRAGMENTED_MP4.

" }, "DiscontinuityMode":{ "shape":"DiscontinuityMode", - "documentation":"

Specifies when flags marking discontinuities between fragments will be added to the media playlists. The default is ALWAYS when HLSFragmentSelector is SERVER_TIMESTAMP, and NEVER when it is PRODUCER_TIMESTAMP.

Media players typically build a timeline of media content to play, based on the time stamps of each fragment. This means that if there is any overlap between fragments (as is typical if HLSFragmentSelector is SERVER_TIMESTAMP), the media player timeline has small gaps between fragments in some places, and overwrites frames in other places. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the fragment being played immediately after the previous fragment. We recommend that you always have discontinuity flags between fragments if the fragment time stamps are not accurate or if fragments might be missing. You should not place discontinuity flags between fragments for the player timeline to accurately map to the producer time stamps.

" + "documentation":"

Specifies when flags marking discontinuities between fragments will be added to the media playlists. The default is ALWAYS when HLSFragmentSelector is SERVER_TIMESTAMP, and NEVER when it is PRODUCER_TIMESTAMP.

Media players typically build a timeline of media content to play, based on the timestamps of each fragment. This means that if there is any overlap between fragments (as is typical if HLSFragmentSelector is SERVER_TIMESTAMP), the media player timeline has small gaps between fragments in some places, and overwrites frames in other places. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the fragment being played immediately after the previous fragment. We recommend that you always have discontinuity flags between fragments if the fragment timestamps are not accurate or if fragments might be missing. You should not place discontinuity flags between fragments for the player timeline to accurately map to the producer timestamps.

" + }, + "DisplayFragmentTimestamp":{ + "shape":"DisplayFragmentTimestamp", + "documentation":"

Specifies when the fragment start timestamps should be included in the HLS media playlist. Typically, media players report the playhead position as a time relative to the start of the first fragment in the playback session. However, when the start timestamps are included in the HLS media playlist, some media players might report the current playhead as an absolute time based on the fragment timestamps. This can be useful for creating a playback experience that shows viewers the wall-clock time of the media.

The default is NEVER. When HLSFragmentSelector is SERVER_TIMESTAMP, the timestamps will be the server start timestamps. Similarly, when HLSFragmentSelector is PRODUCER_TIMESTAMP, the timestamps will be the producer start timestamps.

" }, "Expires":{ "shape":"Expires", @@ -230,7 +252,7 @@ }, "Payload":{ "shape":"Payload", - "documentation":"

The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.

  • AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side time stamp of the fragment.

  • AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side time stamp of the fragment.

The following tags will be included if an exception occurs:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the exception

  • AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the exception

  • AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the exception

" + "documentation":"

The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.

  • AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side timestamp of the fragment.

  • AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side timestamp of the fragment.

The following tags will be included if an exception occurs:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the exception

  • AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the exception

  • AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the exception

" } }, "payload":"Payload" @@ -240,14 +262,14 @@ "members":{ "FragmentSelectorType":{ "shape":"HLSFragmentSelectorType", - "documentation":"

The source of the time stamps for the requested media.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND, the first fragment ingested with a producer time stamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer time stamps within the TimestampRange ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.

Fragments that have duplicate producer time stamps are deduplicated. This means that if producers are producing a stream of fragments with producer time stamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested time stamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE, the producer time stamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server time stamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer time stamps with values now, they are not included in the HLS media playlist.

The default is SERVER_TIMESTAMP.

" + "documentation":"

The source of the timestamps for the requested media.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is ON_DEMAND, the first fragment ingested with a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In addition, the fragments with producer timestamps within the TimestampRange ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value) are included.

Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the HLS media playlists will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and GetHLSStreamingSessionURLInput$PlaybackMode is LIVE, the producer timestamps are used in the MP4 fragments and for deduplication. But the most recently ingested fragments based on server timestamps are included in the HLS media playlist. This means that even if fragments ingested in the past have producer timestamps with values now, they are not included in the HLS media playlist.

The default is SERVER_TIMESTAMP.

" }, "TimestampRange":{ "shape":"HLSTimestampRange", - "documentation":"

The start and end of the time stamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

" + "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

" } }, - "documentation":"

Contains the range of time stamps for the requested media, and the source of the time stamps.

" + "documentation":"

Contains the range of timestamps for the requested media, and the source of the timestamps.

" }, "HLSFragmentSelectorType":{ "type":"string", @@ -262,14 +284,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The start of the time stamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The start of the timestamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the time stamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

If the HLSTimestampRange value is specified, the EndTimestamp value is required.

This value is inclusive. The EndTimestamp is compared to the (starting) time stamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

If the HLSTimestampRange value is specified, the EndTimestamp value is required.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The start and end of the time stamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the HLSTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the HLSTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" }, "InvalidArgumentException":{ "type":"structure", @@ -285,7 +307,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The Codec Private Data in the video stream is not valid for this operation.

", + "documentation":"

The codec private data in at least one of the tracks of the video stream is not valid for this operation.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -307,7 +329,7 @@ }, "FragmentSelector":{ "shape":"FragmentSelector", - "documentation":"

Describes the time stamp range and time stamp origin for the range of fragments to return.

" + "documentation":"

Describes the timestamp range and timestamp origin for the range of fragments to return.

" } } }, @@ -316,7 +338,7 @@ "members":{ "Fragments":{ "shape":"FragmentList", - "documentation":"

A list of fragment numbers that correspond to the time stamp range provided.

" + "documentation":"

A list of archived Fragment objects from the stream that meet the selector criteria. Results are in no specific order, even across pages.

" }, "NextToken":{ "shape":"String", @@ -330,7 +352,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

No Codec Private Data was found in the video stream.

", + "documentation":"

No codec private data was found in at least one of tracks of the video stream.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -403,21 +425,21 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The starting time stamp in the range of time stamps for which to return fragments.

" + "documentation":"

The starting timestamp in the range of timestamps for which to return fragments.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The ending time stamp in the range of time stamps for which to return fragments.

" + "documentation":"

The ending timestamp in the range of timestamps for which to return fragments.

" } }, - "documentation":"

The range of time stamps for which to return fragments.

" + "documentation":"

The range of timestamps for which to return fragments.

" }, "UnsupportedStreamMediaTypeException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

An HLS streaming session was requested for a stream with a media type that is not video/h264.

", + "documentation":"

The type of the media (for example, h.264 video or ACC audio) could not be determined from the codec IDs of the tracks in the first fragment for a playback session. The codec ID for track 1 should be V_MPEG/ISO/AVC and, optionally, the codec ID for track 2 should be A_AAC.

", "error":{"httpStatusCode":400}, "exception":true } diff --git a/botocore/data/kinesis-video-media/2017-09-30/service-2.json b/botocore/data/kinesis-video-media/2017-09-30/service-2.json index fe307b52..84517b33 100644 --- a/botocore/data/kinesis-video-media/2017-09-30/service-2.json +++ b/botocore/data/kinesis-video-media/2017-09-30/service-2.json @@ -27,7 +27,7 @@ {"shape":"ConnectionLimitExceededException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Use this API to retrieve media content from a Kinesis video stream. In the request, you identify stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.

You must first call the GetDataEndpoint API to get an endpoint to which you can then send the GetMedia requests.

When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a \"chunk.\" For more information, see . The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.

The following limits apply when using the GetMedia API:

  • A client can call GetMedia up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMedia session.

" + "documentation":"

Use this API to retrieve media content from a Kinesis video stream. In the request, you identify the stream name or stream Amazon Resource Name (ARN), and the starting chunk. Kinesis Video Streams then returns a stream of chunks in order by fragment number.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMedia requests to this endpoint using the --endpoint-url parameter.

When you put media data (fragments) on a stream, Kinesis Video Streams stores each incoming fragment and related metadata in what is called a \"chunk.\" For more information, see . The GetMedia API returns a stream of these chunks starting from the chunk that you specify in the request.

The following limits apply when using the GetMedia API:

  • A client can call GetMedia up to five times per second per stream.

  • Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMedia session.

" } }, "shapes":{ @@ -97,7 +97,7 @@ }, "Payload":{ "shape":"Payload", - "documentation":"

The payload Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see . The chunks that Kinesis Video Streams returns in the GetMedia call also include the following additional Matroska (MKV) tags:

  • AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 string) - In the event your GetMedia call terminates, you can use this continuation token in your next request to get the next chunk where the last request terminated.

  • AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - Client applications can use this tag value to determine how far behind the chunk returned in the response is from the latest chunk on the stream.

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.

  • AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server time stamp of the fragment.

  • AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer time stamp of the fragment.

The following tags will be present if an error occurs:

  • AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused GetMedia to stop.

  • AWS_KINESISVIDEO_ERROR_ID: Integer code of the error.

The error codes are as follows:

  • 3002 - Error writing to the stream

  • 4000 - Requested fragment is not found

  • 4500 - Access denied for the stream's KMS key

  • 4501 - Stream's KMS key is disabled

  • 4502 - Validation error on the Stream's KMS key

  • 4503 - KMS key specified in the stream is unavailable

  • 4504 - Invalid usage of the KMS key specified in the stream

  • 4505 - Invalid state of the KMS key specified in the stream

  • 4506 - Unable to find the KMS key specified in the stream

  • 5000 - Internal error

" + "documentation":"

The payload Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see . The chunks that Kinesis Video Streams returns in the GetMedia call also include the following additional Matroska (MKV) tags:

  • AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 string) - In the event your GetMedia call terminates, you can use this continuation token in your next request to get the next chunk where the last request terminated.

  • AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - Client applications can use this tag value to determine how far behind the chunk returned in the response is from the latest chunk on the stream.

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.

  • AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server timestamp of the fragment.

  • AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer timestamp of the fragment.

The following tags will be present if an error occurs:

  • AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused GetMedia to stop.

  • AWS_KINESISVIDEO_ERROR_ID: Integer code of the error.

The error codes are as follows:

  • 3002 - Error writing to the stream

  • 4000 - Requested fragment is not found

  • 4500 - Access denied for the stream's KMS key

  • 4501 - Stream's KMS key is disabled

  • 4502 - Validation error on the stream's KMS key

  • 4503 - KMS key specified in the stream is unavailable

  • 4504 - Invalid usage of the KMS key specified in the stream

  • 4505 - Invalid state of the KMS key specified in the stream

  • 4506 - Unable to find the KMS key specified in the stream

  • 5000 - Internal error

" } }, "payload":"Payload" @@ -154,7 +154,7 @@ "members":{ "StartSelectorType":{ "shape":"StartSelectorType", - "documentation":"

Identifies the fragment on the Kinesis video stream where you want to start getting the data from.

  • NOW - Start with the latest chunk on the stream.

  • EARLIEST - Start with earliest available chunk on the stream.

  • FRAGMENT_NUMBER - Start with the chunk containing the specific fragment. You must also specify the StartFragmentNumber.

  • PRODUCER_TIMESTAMP or SERVER_TIMESTAMP - Start with the chunk containing a fragment with the specified producer or server time stamp. You specify the time stamp by adding StartTimestamp.

  • CONTINUATION_TOKEN - Read using the specified continuation token.

If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType, you don't provide any additional information in the startSelector.

" + "documentation":"

Identifies the fragment on the Kinesis video stream where you want to start getting the data from.

  • NOW - Start with the latest chunk on the stream.

  • EARLIEST - Start with earliest available chunk on the stream.

  • FRAGMENT_NUMBER - Start with the chunk containing the specific fragment. You must also specify the StartFragmentNumber.

  • PRODUCER_TIMESTAMP or SERVER_TIMESTAMP - Start with the chunk containing a fragment with the specified producer or server timestamp. You specify the timestamp by adding StartTimestamp.

  • CONTINUATION_TOKEN - Read using the specified continuation token.

If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType, you don't provide any additional information in the startSelector.

" }, "AfterFragmentNumber":{ "shape":"FragmentNumberString", @@ -162,14 +162,14 @@ }, "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

A time stamp value. This value is required if you choose the PRODUCER_TIMESTAMP or the SERVER_TIMESTAMP as the startSelectorType. The GetMedia API then starts with the chunk containing the fragment that has the specified time stamp.

" + "documentation":"

A timestamp value. This value is required if you choose the PRODUCER_TIMESTAMP or the SERVER_TIMESTAMP as the startSelectorType. The GetMedia API then starts with the chunk containing the fragment that has the specified timestamp.

" }, "ContinuationToken":{ "shape":"ContinuationToken", "documentation":"

Continuation token that Kinesis Video Streams returned in the previous GetMedia response. The GetMedia API then starts with the chunk identified by the continuation token.

" } }, - "documentation":"

Identifies the chunk on the Kinesis video stream where you want the GetMedia API to start returning media data. You have the following options to identify the starting chunk:

  • Choose the latest (or oldest) chunk.

  • Identify a specific chunk. You can identify a specific chunk either by providing a fragment number or time stamp (server or producer).

  • Each chunk's metadata includes a continuation token as a Matroska (MKV) tag (AWS_KINESISVIDEO_CONTINUATION_TOKEN). If your previous GetMedia request terminated, you can use this tag value in your next GetMedia request. The API then starts returning chunks starting where the last API ended.

" + "documentation":"

Identifies the chunk on the Kinesis video stream where you want the GetMedia API to start returning media data. You have the following options to identify the starting chunk:

  • Choose the latest (or oldest) chunk.

  • Identify a specific chunk. You can identify a specific chunk either by providing a fragment number or timestamp (server or producer).

  • Each chunk's metadata includes a continuation token as a Matroska (MKV) tag (AWS_KINESISVIDEO_CONTINUATION_TOKEN). If your previous GetMedia request terminated, you can use this tag value in your next GetMedia request. The API then starts returning chunks starting where the last API ended.

" }, "StartSelectorType":{ "type":"string", diff --git a/botocore/data/kinesis/2013-12-02/paginators-1.json b/botocore/data/kinesis/2013-12-02/paginators-1.json index 5f6d0a45..a88324d3 100644 --- a/botocore/data/kinesis/2013-12-02/paginators-1.json +++ b/botocore/data/kinesis/2013-12-02/paginators-1.json @@ -23,6 +23,18 @@ "more_results": "HasMoreStreams", "output_token": "StreamNames[-1]", "result_key": "StreamNames" + }, + "ListShards": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Shards" + }, + "ListStreamConsumers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Consumers" } } } diff --git a/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json b/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json index ea142457..70052cd3 100644 --- a/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json +++ b/botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListApplicationSnapshots": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "SnapshotSummaries" + }, + "ListApplications": { + "input_token": "NextToken", + "limit_key": "Limit", + "output_token": "NextToken", + "result_key": "ApplicationSummaries" + } + } } diff --git a/botocore/data/kinesisvideo/2017-09-30/paginators-1.json b/botocore/data/kinesisvideo/2017-09-30/paginators-1.json index ea142457..bb58205d 100644 --- a/botocore/data/kinesisvideo/2017-09-30/paginators-1.json +++ b/botocore/data/kinesisvideo/2017-09-30/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListStreams": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "StreamInfoList" + } + } } diff --git a/botocore/data/kinesisvideo/2017-09-30/service-2.json b/botocore/data/kinesisvideo/2017-09-30/service-2.json index 98504cc0..aeca0da8 100644 --- a/botocore/data/kinesisvideo/2017-09-30/service-2.json +++ b/botocore/data/kinesisvideo/2017-09-30/service-2.json @@ -25,9 +25,10 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidDeviceException"}, {"shape":"InvalidArgumentException"}, - {"shape":"ClientLimitExceededException"} + {"shape":"ClientLimitExceededException"}, + {"shape":"TagsPerResourceExceededLimitException"} ], - "documentation":"

Creates a new Kinesis video stream.

When you create a new stream, Kinesis Video Streams assigns it a version number. When you change the stream's metadata, Kinesis Video Streams updates the version.

CreateStream is an asynchronous operation.

For information about how the service works, see How it Works.

You must have permissions for the KinesisVideo:CreateStream action.

" + "documentation":"

Creates a new Kinesis video stream.

When you create a new stream, Kinesis Video Streams assigns it a version number. When you change the stream's metadata, Kinesis Video Streams updates the version.

CreateStream is an asynchronous operation.

For information about how the service works, see How it Works.

You must have permissions for the KinesisVideo:CreateStream action.

" }, "DeleteStream":{ "name":"DeleteStream", @@ -41,7 +42,8 @@ {"shape":"ClientLimitExceededException"}, {"shape":"InvalidArgumentException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"NotAuthorizedException"} + {"shape":"NotAuthorizedException"}, + {"shape":"VersionMismatchException"} ], "documentation":"

Deletes a Kinesis video stream and the data contained in the stream.

This method marks the stream for deletion, and makes the data in the stream inaccessible immediately.

To ensure that you have the latest version of the stream before deleting it, you can specify the stream version. Kinesis Video Streams assigns a version to each stream. When you update a stream, Kinesis Video Streams assigns a new version number. To get the latest stream version, use the DescribeStream API.

This operation requires permission for the KinesisVideo:DeleteStream action.

" }, @@ -124,7 +126,7 @@ {"shape":"InvalidResourceFormatException"}, {"shape":"TagsPerResourceExceededLimitException"} ], - "documentation":"

Adds one or more tags to a stream. A tag is a key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

You must provide either the StreamName or the StreamARN.

This operation requires permission for the KinesisVideo:TagStream action.

Kinesis video streams support up to 50 tags.

" + "documentation":"

Adds one or more tags to a stream. A tag is a key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

You must provide either the StreamName or the StreamARN.

This operation requires permission for the KinesisVideo:TagStream action.

Kinesis video streams support up to 50 tags.

" }, "UntagStream":{ "name":"UntagStream", @@ -227,15 +229,19 @@ }, "MediaType":{ "shape":"MediaType", - "documentation":"

The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see Media Types. If you choose to specify the MediaType, see Naming Requirements for guidelines.

To play video on the console, the media must be H.264 encoded, and you need to specify this video type in this parameter as video/h264.

This parameter is optional; the default value is null (or empty in JSON).

" + "documentation":"

The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see Media Types. If you choose to specify the MediaType, see Naming Requirements for guidelines.

This parameter is optional; the default value is null (or empty in JSON).

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key that you want Kinesis Video Streams to use to encrypt stream data.

If no key ID is specified, the default, Kinesis Video-managed key (aws/kinesisvideo) is used.

For more information, see DescribeKey.

" + "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key that you want Kinesis Video Streams to use to encrypt stream data.

If no key ID is specified, the default, Kinesis Video-managed key (aws/kinesisvideo) is used.

For more information, see DescribeKey.

" }, "DataRetentionInHours":{ "shape":"DataRetentionInHours", "documentation":"

The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream.

The default value is 0, indicating that the stream does not persist data.

When the DataRetentionInHours value is 0, consumers can still consume the fragments that remain in the service host buffer, which has a retention time limit of 5 minutes and a retention memory limit of 200 MB. Fragments are removed from the buffer when either limit is reached.

" + }, + "Tags":{ + "shape":"ResourceTags", + "documentation":"

A list of tags to associate with the specified stream. Each tag is a key-value pair (the value is optional).

" } } }, @@ -442,7 +448,7 @@ "type":"string", "max":128, "min":1, - "pattern":"[\\w\\-\\.\\+]+/[\\w\\-\\.\\+]+" + "pattern":"[\\w\\-\\.\\+]+/[\\w\\-\\.\\+]+(,[\\w\\-\\.\\+]+/[\\w\\-\\.\\+]+)*" }, "NextToken":{ "type":"string", @@ -662,7 +668,7 @@ }, "DataRetentionChangeInHours":{ "shape":"DataRetentionChangeInHours", - "documentation":"

The retention period, in hours. The value you specify replaces the current value.

" + "documentation":"

The retention period, in hours. The value you specify replaces the current value. The maximum value for this parameter is 87600 (ten years).

" } } }, @@ -720,7 +726,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The stream version that you specified is not the latest version. To get the latest version, use the DescribeStream API.

", + "documentation":"

The stream version that you specified is not the latest version. To get the latest version, use the DescribeStream API.

", "error":{"httpStatusCode":400}, "exception":true } diff --git a/botocore/data/lambda/2015-03-31/paginators-1.json b/botocore/data/lambda/2015-03-31/paginators-1.json index b2d1577f..c7918f89 100644 --- a/botocore/data/lambda/2015-03-31/paginators-1.json +++ b/botocore/data/lambda/2015-03-31/paginators-1.json @@ -17,6 +17,24 @@ "output_token": "NextMarker", "limit_key": "MaxItems", "result_key": "Aliases" + }, + "ListLayerVersions": { + "input_token": "Marker", + "limit_key": "MaxItems", + "output_token": "NextMarker", + "result_key": "LayerVersions" + }, + "ListLayers": { + "input_token": "Marker", + "limit_key": "MaxItems", + "output_token": "NextMarker", + "result_key": "Layers" + }, + "ListVersionsByFunction": { + "input_token": "Marker", + "limit_key": "MaxItems", + "output_token": "NextMarker", + "result_key": "Versions" } } } diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index 7a481b3a..30438151 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -28,7 +28,7 @@ {"shape":"PolicyLengthExceededException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Adds permissions to the resource-based policy of a version of a function layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all AWS accounts, or all accounts in an organization.

To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it.

" + "documentation":"

Adds permissions to the resource-based policy of a version of an AWS Lambda layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all AWS accounts, or all accounts in an organization.

To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it.

" }, "AddPermission":{ "name":"AddPermission", @@ -48,7 +48,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Adds a permission to the resource policy associated with the specified AWS Lambda function. You use resource policies to grant permissions to event sources that use the push model. In a push model, event sources (such as Amazon S3 and custom applications) invoke your Lambda function. Each permission you add to the resource policy allows an event source permission to invoke the Lambda function.

Permissions apply to the Amazon Resource Name (ARN) used to invoke the function, which can be unqualified (the unpublished version of the function), or include a version or alias. If a client uses a version or alias to invoke a function, use the Qualifier parameter to apply permissions to that ARN. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:AddPermission action.

" + "documentation":"

Grants an AWS service or another account permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function.

To grant permission to another account, specify the account ID as the Principal. For AWS services, the principal is a domain-style identifier defined by the service, like s3.amazonaws.com or sns.amazonaws.com. For AWS services, you can also specify the ARN or owning account of the associated resource as the SourceArn or SourceAccount. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This action adds a statement to a resource-based permission policy for the function. For more information about function policies, see Lambda Function Policies.

" }, "CreateAlias":{ "name":"CreateAlias", @@ -66,7 +66,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates an alias that points to the specified Lambda function version. For more information, see Introduction to AWS Lambda Aliases.

Alias names are unique for a given function. This requires permission for the lambda:CreateAlias action.

" + "documentation":"

Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.

You can also map an alias to split invocation requests between two versions. Use the RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.

" }, "CreateEventSourceMapping":{ "name":"CreateEventSourceMapping", @@ -84,7 +84,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

" + "documentation":"

Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

" }, "CreateFunction":{ "name":"CreateFunction", @@ -103,7 +103,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"CodeStorageExceededException"} ], - "documentation":"

Creates a new Lambda function. The function configuration is created from the request parameters, and the code for the function is provided by a .zip file. The function name is case-sensitive.

This operation requires permission for the lambda:CreateFunction action.

" + "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package contains your function code. The execution role grants the function permission to use AWS services, such as Amazon CloudWatch Logs for log streaming and AWS X-Ray for request tracing.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

If another account or an AWS service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other AWS services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Functions.

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -118,7 +118,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes the specified Lambda function alias. For more information, see Introduction to AWS Lambda Aliases.

This requires permission for the lambda:DeleteAlias action.

" + "documentation":"

Deletes a Lambda function alias.

" }, "DeleteEventSourceMapping":{ "name":"DeleteEventSourceMapping", @@ -136,7 +136,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Deletes an event source mapping.

" + "documentation":"

Deletes an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.

" }, "DeleteFunction":{ "name":"DeleteFunction", @@ -153,7 +153,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. Event source mappings are not deleted.

This operation requires permission for the lambda:DeleteFunction action.

" + "documentation":"

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted.

To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For AWS services and resources that invoke your function directly, delete the trigger in the service where you originally configured it.

" }, "DeleteFunctionConcurrency":{ "name":"DeleteFunctionConcurrency", @@ -169,7 +169,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Removes concurrent execution limits from this function. For more information, see Managing Concurrency.

" + "documentation":"

Removes a concurrent execution limit from a function.

" }, "DeleteLayerVersion":{ "name":"DeleteLayerVersion", @@ -183,7 +183,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes a version of a function layer. Deleted versions can no longer be viewed or added to functions. However, a copy of the version remains in Lambda until no functions refer to it.

" + "documentation":"

Deletes a version of an AWS Lambda layer. Deleted versions can no longer be viewed or added to functions. To avoid breaking functions, a copy of the version remains in Lambda until no functions refer to it.

" }, "GetAccountSettings":{ "name":"GetAccountSettings", @@ -198,7 +198,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ServiceException"} ], - "documentation":"

Retrieves details about your account's limits and usage in a region.

" + "documentation":"

Retrieves details about your account's limits and usage in an AWS Region.

" }, "GetAlias":{ "name":"GetAlias", @@ -215,7 +215,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Returns the specified alias information such as the alias ARN, description, and function version it is pointing to. For more information, see Introduction to AWS Lambda Aliases.

This requires permission for the lambda:GetAlias action.

" + "documentation":"

Returns details about a Lambda function alias.

" }, "GetEventSourceMapping":{ "name":"GetEventSourceMapping", @@ -232,7 +232,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Returns details about an event source mapping.

" + "documentation":"

Returns details about an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.

" }, "GetFunction":{ "name":"GetFunction", @@ -249,7 +249,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns the configuration information of the Lambda function and a presigned URL link to the .zip file you uploaded with CreateFunction so you can download the .zip file. Note that the URL is valid for up to 10 minutes. The configuration information is the same information you provided as parameters when uploading the function.

Use the Qualifier parameter to retrieve a published version of the function. Otherwise, returns the unpublished version ($LATEST). For more information, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:GetFunction action.

" + "documentation":"

Returns information about the function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are returned.

" }, "GetFunctionConfiguration":{ "name":"GetFunctionConfiguration", @@ -266,7 +266,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns the configuration information of the Lambda function. This the same information you provided as parameters when uploading the function by using CreateFunction.

If you are using the versioning feature, you can retrieve this information for a specific function version by using the optional Qualifier parameter and specifying the function version or alias that points to it. If you don't provide it, the API returns information about the $LATEST version of the function. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:GetFunctionConfiguration operation.

" + "documentation":"

Returns the version-specific settings of a Lambda function or version. The output includes only options that can vary between versions of a function. To modify these settings, use UpdateFunctionConfiguration.

To get all of a function's details, including function-level settings, use GetFunction.

" }, "GetLayerVersion":{ "name":"GetLayerVersion", @@ -283,7 +283,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns information about a version of a function layer, with a link to download the layer archive that's valid for 10 minutes.

" + "documentation":"

Returns information about a version of an AWS Lambda layer, with a link to download the layer archive that's valid for 10 minutes.

" }, "GetLayerVersionPolicy":{ "name":"GetLayerVersionPolicy", @@ -300,7 +300,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns the permission policy for a layer version. For more information, see AddLayerVersionPermission.

" + "documentation":"

Returns the permission policy for a version of an AWS Lambda layer. For more information, see AddLayerVersionPermission.

" }, "GetPolicy":{ "name":"GetPolicy", @@ -317,7 +317,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns the resource policy associated with the specified Lambda function.

This action requires permission for the lambda:GetPolicy action.

" + "documentation":"

Returns the resource-based IAM policy for a function, version, or alias.

" }, "Invoke":{ "name":"Invoke", @@ -349,7 +349,7 @@ {"shape":"KMSNotFoundException"}, {"shape":"InvalidRuntimeException"} ], - "documentation":"

Invokes a Lambda function. For an example, see Create the Lambda Function and Test It Manually.

Specify just a function name to invoke the latest version of the function. To invoke a published version, use the Qualifier parameter to specify a version or alias.

If you use the RequestResponse (synchronous) invocation option, the function will be invoked only once. If you use the Event (asynchronous) invocation option, the function will be invoked at least once in response to an event and the function must be idempotent to handle this.

For functions with a long timeout, your client may be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action.

The TooManyRequestsException noted below will return the following: ConcurrentInvocationLimitExceeded will be returned if you have no functions with reserved concurrency and have exceeded your account concurrent limit or if a function without reserved concurrency exceeds the account's unreserved concurrency limit. ReservedFunctionConcurrentInvocationLimitExceeded will be returned when a function with reserved concurrency exceeds its configured concurrency limit.

" + "documentation":"

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace. To record function errors for asynchronous invocations, configure your function with a dead letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action.

" }, "InvokeAsync":{ "name":"InvokeAsync", @@ -366,7 +366,7 @@ {"shape":"InvalidRequestContentException"}, {"shape":"InvalidRuntimeException"} ], - "documentation":"

For asynchronous function invocation, use Invoke.

Submits an invocation request to AWS Lambda. Upon receiving the request, Lambda executes the specified function asynchronously. To see the logs generated by the Lambda function execution, see the CloudWatch Logs console.

This operation requires permission for the lambda:InvokeFunction action.

", + "documentation":"

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

", "deprecated":true }, "ListAliases":{ @@ -384,7 +384,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Returns list of aliases created for a Lambda function. For each alias, the response includes information such as the alias ARN, description, alias name, and the function version to which it points. For more information, see Introduction to AWS Lambda Aliases.

This requires permission for the lambda:ListAliases action.

" + "documentation":"

Returns a list of aliases for a Lambda function.

" }, "ListEventSourceMappings":{ "name":"ListEventSourceMappings", @@ -417,7 +417,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns a list of your Lambda functions. For each function, the response includes the function configuration information. You must use GetFunction to retrieve the code for your function.

This operation requires permission for the lambda:ListFunctions action.

If you are using the versioning feature, you can list all of your functions or only $LATEST versions. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

" + "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version. To get more information about a function or version, use GetFunction.

" }, "ListLayerVersions":{ "name":"ListLayerVersions", @@ -434,7 +434,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the versions of a function layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime.

" + "documentation":"

Lists the versions of an AWS Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime.

" }, "ListLayers":{ "name":"ListLayers", @@ -450,7 +450,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists function layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime.

" + "documentation":"

Lists AWS Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime.

" }, "ListTags":{ "name":"ListTags", @@ -466,7 +466,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Returns a list of tags assigned to a function when supplied the function ARN (Amazon Resource Name). For more information on Tagging, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

Returns a function's tags. You can also view tags with GetFunction.

" }, "ListVersionsByFunction":{ "name":"ListVersionsByFunction", @@ -483,7 +483,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all versions of a function. For information about versioning, see AWS Lambda Function Versioning and Aliases.

" + "documentation":"

Returns a list of versions, with the version-specific configuration of each.

" }, "PublishLayerVersion":{ "name":"PublishLayerVersion", @@ -501,7 +501,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"CodeStorageExceededException"} ], - "documentation":"

Creates a function layer from a ZIP archive. Each time you call PublishLayerVersion with the same version name, a new version is created.

Add layers to your function with CreateFunction or UpdateFunctionConfiguration.

" + "documentation":"

Creates an AWS Lambda layer from a ZIP archive. Each time you call PublishLayerVersion with the same version name, a new version is created.

Add layers to your function with CreateFunction or UpdateFunctionConfiguration.

" }, "PublishVersion":{ "name":"PublishVersion", @@ -520,7 +520,7 @@ {"shape":"CodeStorageExceededException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Publishes a version of your function from the current snapshot of $LATEST. That is, AWS Lambda takes a snapshot of the function code and configuration information from $LATEST and publishes a new version. The code and configuration cannot be modified after publication. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

" + "documentation":"

Creates a version from the current code and configuration of a function. Use versions to create a snapshot of your function code and configuration that doesn't change.

AWS Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the function before publishing a version.

Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.

" }, "PutFunctionConcurrency":{ "name":"PutFunctionConcurrency", @@ -537,7 +537,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Sets a limit on the number of concurrent executions available to this function. It is a subset of your account's total concurrent execution limit per region. Note that Lambda automatically reserves a buffer of 100 concurrent executions for functions without any reserved concurrency limit. This means if your account limit is 1000, you have a total of 900 available to allocate to individual functions. For more information, see Managing Concurrency.

" + "documentation":"

Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level.

Concurrency settings apply to the function as a whole, including all published versions and the unpublished version. Reserving concurrency both ensures that your function has capacity to process the specified number of events simultaneously, and prevents it from scaling beyond that level. Use GetFunction to see the current setting for a function.

Use GetAccountSettings to see your regional concurrency limit. You can reserve concurrency for as many functions as you like, as long as you leave at least 100 simultaneous executions unreserved for functions that aren't configured with a per-function limit. For more information, see Managing Concurrency.

" }, "RemoveLayerVersionPermission":{ "name":"RemoveLayerVersionPermission", @@ -554,7 +554,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Removes a statement from the permissions policy for a layer version. For more information, see AddLayerVersionPermission.

" + "documentation":"

Removes a statement from the permissions policy for a version of an AWS Lambda layer. For more information, see AddLayerVersionPermission.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -571,7 +571,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Removes permissions from a function. You can remove individual permissions from an resource policy associated with a Lambda function by providing a statement ID that you provided when you added the permission. When you remove permissions, disable the event source mapping or trigger configuration first to avoid errors.

Permissions apply to the Amazon Resource Name (ARN) used to invoke the function, which can be unqualified (the unpublished version of the function), or include a version or alias. If a client uses a version or alias to invoke a function, use the Qualifier parameter to apply permissions to that ARN. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

You need permission for the lambda:RemovePermission action.

" + "documentation":"

Revokes function-use permission from an AWS service or another account. You can get the ID of the statement from the output of GetPolicy.

" }, "TagResource":{ "name":"TagResource", @@ -587,7 +587,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates a list of tags (key-value pairs) on the Lambda function. Requires the Lambda function ARN (Amazon Resource Name). If a key is specified without a value, Lambda creates a tag with the specified key and a value of null. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

Adds tags to a function.

" }, "UntagResource":{ "name":"UntagResource", @@ -603,7 +603,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes tags from a Lambda function. Requires the function ARN (Amazon Resource Name). For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

Removes tags from a function.

" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -621,7 +621,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Using this API you can update the function version to which the alias points and the alias description. For more information, see Introduction to AWS Lambda Aliases.

This requires permission for the lambda:UpdateAlias action.

" + "documentation":"

Updates the configuration of a Lambda function alias.

" }, "UpdateEventSourceMapping":{ "name":"UpdateEventSourceMapping", @@ -659,7 +659,7 @@ {"shape":"CodeStorageExceededException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Updates the code for the specified Lambda function. This operation must only be used on an existing Lambda function and cannot be used to update the function configuration.

If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:UpdateFunctionCode action.

" + "documentation":"

Updates a Lambda function's code.

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

" }, "UpdateFunctionConfiguration":{ "name":"UpdateFunctionConfiguration", @@ -678,7 +678,7 @@ {"shape":"ResourceConflictException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.

If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:UpdateFunctionConfiguration action.

" + "documentation":"

Modify the version-specifc settings of a Lambda function.

These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an account or AWS service, use AddPermission.

" } }, "shapes":{ @@ -687,40 +687,40 @@ "members":{ "TotalCodeSize":{ "shape":"Long", - "documentation":"

Maximum size, in bytes, of a code package you can upload per region. The default size is 75 GB.

" + "documentation":"

The amount of storage space that you can use for all deployment packages and layer archives.

" }, "CodeSizeUnzipped":{ "shape":"Long", - "documentation":"

Size, in bytes, of code/dependencies that you can zip into a deployment package (uncompressed zip/jar size) for uploading. The default limit is 250 MB.

" + "documentation":"

The maximum size of your function's code and layers when they're extracted.

" }, "CodeSizeZipped":{ "shape":"Long", - "documentation":"

Size, in bytes, of a single zipped code/dependencies package you can upload for your Lambda function(.zip/.jar file). Try using Amazon S3 for uploading larger files. Default limit is 50 MB.

" + "documentation":"

The maximum size of a deployment package when it's uploaded directly to AWS Lambda. Use Amazon S3 for larger files.

" }, "ConcurrentExecutions":{ "shape":"Integer", - "documentation":"

Number of simultaneous executions of your function per region. The default limit is 1000.

" + "documentation":"

The maximum number of simultaneous function executions.

" }, "UnreservedConcurrentExecutions":{ "shape":"UnreservedConcurrentExecutions", - "documentation":"

The number of concurrent executions available to functions that do not have concurrency limits set. For more information, see Managing Concurrency.

" + "documentation":"

The maximum number of simultaneous function executions, minus the capacity that's reserved for individual functions with PutFunctionConcurrency.

" } }, - "documentation":"

Provides limits of code size and concurrency associated with the current account and region. For more information or to request a limit increase for concurrent executions, see Lambda Limits.

" + "documentation":"

Limits that are related to concurrency and code storage. All file and storage sizes are in bytes.

" }, "AccountUsage":{ "type":"structure", "members":{ "TotalCodeSize":{ "shape":"Long", - "documentation":"

Total size, in bytes, of the account's deployment packages per region.

" + "documentation":"

The amount of storage space, in bytes, that's being used by deployment packages and layer archives.

" }, "FunctionCount":{ "shape":"Long", - "documentation":"

The number of your account's existing functions per region.

" + "documentation":"

The number of Lambda functions.

" } }, - "documentation":"

Provides code size usage and function count associated with the current account and region.

" + "documentation":"

The number of functions and amount of storage in use.

" }, "Action":{ "type":"string", @@ -738,7 +738,7 @@ "members":{ "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -796,33 +796,33 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function, version, or alias.

Name formats

  • Function name - my-function (name-only), my-function:v1 (with alias).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "StatementId":{ "shape":"StatementId", - "documentation":"

A unique statement identifier.

" + "documentation":"

A statement identifier that differentiates the statement from others in the same policy.

" }, "Action":{ "shape":"Action", - "documentation":"

The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with lambda: followed by the API name . For example, lambda:CreateFunction. You can use wildcard (lambda:*) to grant permission for all AWS Lambda actions.

" + "documentation":"

The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction.

" }, "Principal":{ "shape":"Principal", - "documentation":"

The principal who is getting this permission. The principal can be an AWS service (e.g. s3.amazonaws.com or sns.amazonaws.com) for service triggers, or an account ID for cross-account access. If you specify a service as a principal, use the SourceArn parameter to limit who can invoke the function through that service.

" + "documentation":"

The AWS service or account that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service.

" }, "SourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name of the invoker.

If you add a permission to a service principal without providing the source ARN, any AWS account that creates a mapping to your function ARN can invoke your Lambda function.

" + "documentation":"

For AWS services, the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

" }, "SourceAccount":{ "shape":"SourceOwner", - "documentation":"

This parameter is used for S3 and SES. The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket, then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS account created the bucket). You can also use this condition to specify all sources (that is, you don't specify the SourceArn) owned by a specific account.

" + "documentation":"

For AWS services, the ID of the account that owns the resource. Use this instead of SourceArn to grant permission to resources that are owned by another account (for example, all of an account's Amazon S3 buckets). Or use it together with SourceArn to ensure that the resource is owned by the specified account. For example, an Amazon S3 bucket could be deleted by its owner and recreated by another account.

" }, "EventSourceToken":{ "shape":"EventSourceToken", - "documentation":"

A unique token that must be supplied by the principal invoking the function. This is currently only used for Alexa Smart Home functions.

" + "documentation":"

For Alexa Smart Home functions, a token that must be supplied by the invoker.

" }, "Qualifier":{ "shape":"Qualifier", @@ -832,7 +832,7 @@ }, "RevisionId":{ "shape":"String", - "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias

" + "documentation":"

Only update the policy if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

" } } }, @@ -841,7 +841,7 @@ "members":{ "Statement":{ "shape":"String", - "documentation":"

The permission statement you specified in the request. The response returns the same as a string using a backslash (\"\\\") as an escape character in the JSON.

" + "documentation":"

The permission statement that's added to the function policy.

" } } }, @@ -867,30 +867,30 @@ "members":{ "AliasArn":{ "shape":"FunctionArn", - "documentation":"

Lambda function ARN that is qualified using the alias name as the suffix. For example, if you create an alias called BETA that points to a helloworld function version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA.

" + "documentation":"

The Amazon Resource Name (ARN) of the alias.

" }, "Name":{ "shape":"Alias", - "documentation":"

Alias name.

" + "documentation":"

The name of the alias.

" }, "FunctionVersion":{ "shape":"Version", - "documentation":"

Function version to which the alias points.

" + "documentation":"

The function version that the alias invokes.

" }, "Description":{ "shape":"Description", - "documentation":"

Alias description.

" + "documentation":"

A description of the alias.

" }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

Specifies an additional function versions the alias points to, allowing you to dictate what percentage of traffic will invoke each version.

" + "documentation":"

The routing configuration of the alias.

" }, "RevisionId":{ "shape":"String", - "documentation":"

Represents the latest updated revision of the function or alias.

" + "documentation":"

A unique identifier that changes when you update the alias.

" } }, - "documentation":"

Provides configuration information about a Lambda function version alias.

" + "documentation":"

Provides configuration information about a Lambda function alias.

" }, "AliasList":{ "type":"list", @@ -901,10 +901,10 @@ "members":{ "AdditionalVersionWeights":{ "shape":"AdditionalVersionWeights", - "documentation":"

The name of the second alias, and the percentage of traffic that is routed to it.

" + "documentation":"

The name of the second alias, and the percentage of traffic that's routed to it.

" } }, - "documentation":"

The alias's traffic shifting configuration.

" + "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" }, "Arn":{ "type":"string", @@ -933,7 +933,7 @@ }, "message":{"shape":"String"} }, - "documentation":"

You have exceeded your maximum total code size per account. Limits

", + "documentation":"

You have exceeded your maximum total code size per account. Limits

", "error":{"httpStatusCode":400}, "exception":true }, @@ -947,7 +947,7 @@ "members":{ "ReservedConcurrentExecutions":{ "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of concurrent executions reserved for this function. For more information, see Managing Concurrency.

" + "documentation":"

The number of concurrent executions that are reserved for this function. For more information, see Managing Concurrency.

" } } }, @@ -961,25 +961,25 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Name":{ "shape":"Alias", - "documentation":"

Name for the alias you are creating.

" + "documentation":"

The name of the alias.

" }, "FunctionVersion":{ "shape":"Version", - "documentation":"

Lambda function version for which you are creating the alias.

" + "documentation":"

The function version that the alias invokes.

" }, "Description":{ "shape":"Description", - "documentation":"

Description of the alias.

" + "documentation":"

A description of the alias.

" }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

Specifies an additional version your alias can point to, allowing you to dictate what percentage of traffic will invoke each version. For more information, see Traffic Shifting Using Aliases.

" + "documentation":"

The routing configuration of the alias.

" } } }, @@ -1012,7 +1012,7 @@ }, "StartingPositionTimestamp":{ "shape":"Date", - "documentation":"

With StartingPosition set to AT_TIMESTAMP, the Unix time in seconds from which to start reading.

" + "documentation":"

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading.

" } } }, @@ -1028,19 +1028,19 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - my-function.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" }, "Runtime":{ "shape":"Runtime", - "documentation":"

The runtime version for the function.

" + "documentation":"

The identifier of the function's runtime.

" }, "Role":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" + "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" }, "Handler":{ "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to execute your function. For more information, see Programming Model.

" + "documentation":"

The name of the method within your code that Lambda calls to execute your function. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model.

" }, "Code":{ "shape":"FunctionCode", @@ -1052,11 +1052,11 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The amount of time that Lambda allows a function to run before terminating it. The default is 3 seconds. The maximum allowed value is 900 seconds.

" + "documentation":"

The amount of time that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds.

" }, "MemorySize":{ "shape":"MemorySize", - "documentation":"

The amount of memory that your function has access to. Increasing the function's memory also increases it's CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.

" + "documentation":"

The amount of memory that your function has access to. Increasing the function's memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.

" }, "Publish":{ "shape":"Boolean", @@ -1064,11 +1064,11 @@ }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

If your Lambda function accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.

" + "documentation":"

For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. For more information, see VPC Settings.

" }, "DeadLetterConfig":{ "shape":"DeadLetterConfig", - "documentation":"

A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues.

" + "documentation":"

A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues.

" }, "Environment":{ "shape":"Environment", @@ -1076,7 +1076,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

The ARN of the KMS key used to encrypt your function's environment variables. If not provided, AWS Lambda will use a default service key.

" + "documentation":"

The ARN of the AWS Key Management Service (AWS KMS) key that's used to encrypt your function's environment variables. If it's not provided, AWS Lambda uses a default service key.

" }, "TracingConfig":{ "shape":"TracingConfig", @@ -1084,11 +1084,11 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The list of tags (key-value pairs) assigned to the new function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

A list of tags to apply to the function.

" }, "Layers":{ "shape":"LayerList", - "documentation":"

A list of function layers to add to the function's execution environment.

" + "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" } } }, @@ -1101,7 +1101,7 @@ "documentation":"

The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.

" } }, - "documentation":"

The dead letter queue for failed asynchronous invocations.

" + "documentation":"

The dead letter queue for failed asynchronous invocations.

" }, "DeleteAliasRequest":{ "type":"structure", @@ -1112,13 +1112,13 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Name":{ "shape":"Alias", - "documentation":"

Name of the alias to delete.

", + "documentation":"

The name of the alias.

", "location":"uri", "locationName":"Name" } @@ -1142,7 +1142,7 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - my-function.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" } @@ -1154,13 +1154,13 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function or version.

Name formats

  • Function name - my-function (name-only), my-function:1 (with version).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Qualifier":{ "shape":"Qualifier", - "documentation":"

Specify a version to delete. You cannot delete a version that is referenced by an alias.

", + "documentation":"

Specify a version to delete. You can't delete a version that's referenced by an alias.

", "location":"querystring", "locationName":"Qualifier" } @@ -1175,7 +1175,7 @@ "members":{ "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -1256,7 +1256,7 @@ "documentation":"

The error message.

" } }, - "documentation":"

Error messages for environment variables that could not be applied.

" + "documentation":"

Error messages for environment variables that couldn't be applied.

" }, "EnvironmentResponse":{ "type":"structure", @@ -1267,7 +1267,7 @@ }, "Error":{ "shape":"EnvironmentError", - "documentation":"

Error messages for environment variables that could not be applied.

" + "documentation":"

Error messages for environment variables that couldn't be applied.

" } }, "documentation":"

The results of a configuration update that applied environment variables.

" @@ -1308,7 +1308,7 @@ }, "LastModified":{ "shape":"Date", - "documentation":"

The date that the event source mapping was last updated, in Unix time seconds.

" + "documentation":"

The date that the event source mapping was last updated.

" }, "LastProcessingResult":{ "shape":"String", @@ -1352,11 +1352,11 @@ "members":{ "ZipFile":{ "shape":"Blob", - "documentation":"

The base64-encoded contents of your zip file containing your deployment package. AWS SDK and AWS CLI clients handle the encoding for you.

" + "documentation":"

The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for you.

" }, "S3Bucket":{ "shape":"S3Bucket", - "documentation":"

An Amazon S3 bucket in the same region as your function.

" + "documentation":"

An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.

" }, "S3Key":{ "shape":"S3Key", @@ -1367,21 +1367,21 @@ "documentation":"

For versioned objects, the version of the deployment package object to use.

" } }, - "documentation":"

The code for the Lambda function. You can specify either an S3 location, or upload a deployment package directly.

" + "documentation":"

The code for the Lambda function. You can specify either an object in Amazon S3, or upload a deployment package directly.

" }, "FunctionCodeLocation":{ "type":"structure", "members":{ "RepositoryType":{ "shape":"String", - "documentation":"

The repository from which you can download the function.

" + "documentation":"

The service that's hosting the file.

" }, "Location":{ "shape":"String", - "documentation":"

The presigned URL you can use to download the function's .zip file that you previously uploaded. The URL is valid for up to 10 minutes.

" + "documentation":"

A presigned URL that you can use to download the deployment package.

" } }, - "documentation":"

The object for the Lambda function location.

" + "documentation":"

Details about a function's deployment package.

" }, "FunctionConfiguration":{ "type":"structure", @@ -1392,7 +1392,7 @@ }, "FunctionArn":{ "shape":"NameSpacedFunctionArn", - "documentation":"

The function's Amazon Resource Name.

" + "documentation":"

The function's Amazon Resource Name (ARN).

" }, "Runtime":{ "shape":"Runtime", @@ -1404,11 +1404,11 @@ }, "Handler":{ "shape":"Handler", - "documentation":"

The function Lambda calls to begin executing your function.

" + "documentation":"

The function that Lambda calls to begin executing your function.

" }, "CodeSize":{ "shape":"Long", - "documentation":"

The size of the function's deployment package in bytes.

" + "documentation":"

The size of the function's deployment package, in bytes.

" }, "Description":{ "shape":"Description", @@ -1416,11 +1416,11 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The amount of time that Lambda allows a function to run before terminating it.

" + "documentation":"

The amount of time that Lambda allows a function to run before stopping it.

" }, "MemorySize":{ "shape":"MemorySize", - "documentation":"

The memory allocated to the function

" + "documentation":"

The memory that's allocated to the function.

" }, "LastModified":{ "shape":"Timestamp", @@ -1448,7 +1448,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

The KMS key used to encrypt the function's environment variables. Only returned if you've configured a customer managed CMK.

" + "documentation":"

The KMS key that's used to encrypt the function's environment variables. This key is only returned if you've configured a customer-managed CMK.

" }, "TracingConfig":{ "shape":"TracingConfigResponse", @@ -1456,18 +1456,18 @@ }, "MasterArn":{ "shape":"FunctionArn", - "documentation":"

The ARN of the master function.

" + "documentation":"

For Lambda@Edge functions, the ARN of the master function.

" }, "RevisionId":{ "shape":"String", - "documentation":"

Represents the latest updated revision of the function or alias.

" + "documentation":"

The latest updated revision of the function or alias.

" }, "Layers":{ "shape":"LayersReferenceList", - "documentation":"

A list of function layers.

" + "documentation":"

The function's layers.

" } }, - "documentation":"

A Lambda function's configuration settings.

" + "documentation":"

Details about a function's configuration.

" }, "FunctionList":{ "type":"list", @@ -1493,7 +1493,7 @@ "members":{ "AccountLimit":{ "shape":"AccountLimit", - "documentation":"

Limits related to concurrency and code storage.

" + "documentation":"

Limits that are related to concurrency and code storage.

" }, "AccountUsage":{ "shape":"AccountUsage", @@ -1510,13 +1510,13 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Name":{ "shape":"Alias", - "documentation":"

Name of the alias for which you want to retrieve information.

", + "documentation":"

The name of the alias.

", "location":"uri", "locationName":"Name" } @@ -1540,7 +1540,7 @@ "members":{ "FunctionName":{ "shape":"NamespacedFunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function, version, or alias.

Name formats

  • Function name - my-function (name-only), my-function:v1 (with alias).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, @@ -1558,7 +1558,7 @@ "members":{ "FunctionName":{ "shape":"NamespacedFunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function, version, or alias.

Name formats

  • Function name - my-function (name-only), my-function:v1 (with alias).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, @@ -1575,22 +1575,21 @@ "members":{ "Configuration":{ "shape":"FunctionConfiguration", - "documentation":"

The function's configuration.

" + "documentation":"

The configuration of the function or version.

" }, "Code":{ "shape":"FunctionCodeLocation", - "documentation":"

The function's code.

" + "documentation":"

The deployment package of the function or version.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Returns the list of tags associated with the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

The function's tags.

" }, "Concurrency":{ "shape":"Concurrency", - "documentation":"

The concurrent execution limit set for this function. For more information, see Managing Concurrency.

" + "documentation":"

The function's reserved concurrency.

" } - }, - "documentation":"

This response contains the object for the Lambda function location (see FunctionCodeLocation.

" + } }, "GetLayerVersionPolicyRequest":{ "type":"structure", @@ -1601,7 +1600,7 @@ "members":{ "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -1635,7 +1634,7 @@ "members":{ "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -1656,7 +1655,7 @@ }, "LayerArn":{ "shape":"LayerArn", - "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" + "documentation":"

The ARN of the layer.

" }, "LayerVersionArn":{ "shape":"LayerVersionArn", @@ -1690,13 +1689,13 @@ "members":{ "FunctionName":{ "shape":"NamespacedFunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function, version, or alias.

Name formats

  • Function name - my-function (name-only), my-function:v1 (with alias).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Qualifier":{ "shape":"Qualifier", - "documentation":"

You can specify this optional query parameter to specify a function version or an alias name in which case this API will return all permissions associated with the specific qualified ARN. If you don't provide this parameter, the API will return permissions that apply to the unqualified function ARN.

", + "documentation":"

Specify a version or alias to get the policy for that resource.

", "location":"querystring", "locationName":"Qualifier" } @@ -1707,11 +1706,11 @@ "members":{ "Policy":{ "shape":"String", - "documentation":"

The resource policy associated with the specified function. The response returns the same as a string using a backslash (\"\\\") as an escape character in the JSON.

" + "documentation":"

The resource-based policy.

" }, "RevisionId":{ "shape":"String", - "documentation":"

Represents the latest updated revision of the function or alias.

" + "documentation":"

A unique identifier for the current revision of the policy.

" } } }, @@ -1800,31 +1799,31 @@ "members":{ "FunctionName":{ "shape":"NamespacedFunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function, version, or alias.

Name formats

  • Function name - my-function (name-only), my-function:v1 (with alias).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "InvocationType":{ "shape":"InvocationType", - "documentation":"

Choose from the following options.

  • RequestResponse (default) - Invoke the function synchronously. Keep the connection open until the function returns a response or times out.

  • Event - Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if configured).

  • DryRun - Validate parameter values and verify that the user or role has permission to invoke the function.

", + "documentation":"

Choose from the following options.

  • RequestResponse (default) - Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response and additional data.

  • Event - Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if it's configured). The API response only includes a status code.

  • DryRun - Validate parameter values and verify that the user or role has permission to invoke the function.

", "location":"header", "locationName":"X-Amz-Invocation-Type" }, "LogType":{ "shape":"LogType", - "documentation":"

You can set this optional parameter to Tail in the request only if you specify the InvocationType parameter with value RequestResponse. In this case, AWS Lambda returns the base64-encoded last 4 KB of log data produced by your Lambda function in the x-amz-log-result header.

", + "documentation":"

Set to Tail to include the execution log in the response.

", "location":"header", "locationName":"X-Amz-Log-Type" }, "ClientContext":{ "shape":"String", - "documentation":"

Using the ClientContext you can pass client-specific information to the Lambda function you are invoking. You can then process the client information in your Lambda function as you choose through the context variable. For an example of a ClientContext JSON, see PutEvents in the Amazon Mobile Analytics API Reference and User Guide.

The ClientContext JSON must be base64-encoded and has a maximum size of 3583 bytes.

ClientContext information is returned only if you use the synchronous (RequestResponse) invocation type.

", + "documentation":"

Up to 3583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", "location":"header", "locationName":"X-Amz-Client-Context" }, "Payload":{ "shape":"Blob", - "documentation":"

JSON that you want to provide to your Lambda function as input.

" + "documentation":"

The JSON that you want to provide to your Lambda function as input.

" }, "Qualifier":{ "shape":"Qualifier", @@ -1840,33 +1839,32 @@ "members":{ "StatusCode":{ "shape":"Integer", - "documentation":"

The HTTP status code will be in the 200 range for successful request. For the RequestResponse invocation type this status code will be 200. For the Event invocation type this status code will be 202. For the DryRun invocation type the status code will be 204.

", + "documentation":"

The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.

", "location":"statusCode" }, "FunctionError":{ "shape":"String", - "documentation":"

Indicates whether an error occurred while executing the Lambda function. If an error occurred this field will have one of two values; Handled or Unhandled. Handled errors are errors that are reported by the function while the Unhandled errors are those detected and reported by AWS Lambda. Unhandled errors include out of memory errors and function timeouts. For information about how to report an Handled error, see Programming Model.

", + "documentation":"

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

  • Handled - The runtime caught an error thrown by the function and formatted it into a JSON document.

  • Unhandled - The runtime didn't handle the error. For example, the function ran out of memory or timed out.

", "location":"header", "locationName":"X-Amz-Function-Error" }, "LogResult":{ "shape":"String", - "documentation":"

It is the base64-encoded logs for the Lambda function invocation. This is present only if the invocation type is RequestResponse and the logs were requested.

", + "documentation":"

The last 4 KB of the execution log, which is base64 encoded.

", "location":"header", "locationName":"X-Amz-Log-Result" }, "Payload":{ "shape":"Blob", - "documentation":"

It is the JSON representation of the object returned by the Lambda function. This is present only if the invocation type is RequestResponse.

In the event of a function error this field contains a message describing the error. For the Handled errors the Lambda function will report this message. For Unhandled errors AWS Lambda reports the message.

" + "documentation":"

The response from the function, or an error object.

" }, "ExecutedVersion":{ "shape":"Version", - "documentation":"

The function version that has been executed. This value is returned only if the invocation type is RequestResponse. For more information, see Traffic Shifting Using Aliases.

", + "documentation":"

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", "location":"header", "locationName":"X-Amz-Executed-Version" } }, - "documentation":"

Upon success, returns an empty response. Otherwise, throws an exception.

", "payload":"Payload" }, "InvocationType":{ @@ -1886,13 +1884,13 @@ "members":{ "FunctionName":{ "shape":"NamespacedFunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - my-function.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "InvokeArgs":{ "shape":"BlobStream", - "documentation":"

JSON that you want to provide to your Lambda function as input.

" + "documentation":"

The JSON that you want to provide to your Lambda function as input.

" } }, "deprecated":true, @@ -1903,11 +1901,11 @@ "members":{ "Status":{ "shape":"HttpStatus", - "documentation":"

It will be 202 upon success.

", + "documentation":"

The status code.

", "location":"statusCode" } }, - "documentation":"

Upon success, it returns empty response. Otherwise, throws an exception.

", + "documentation":"

A success response (202 Accepted) indicates that the request is queued for invocation.

", "deprecated":true }, "KMSAccessDeniedException":{ @@ -1966,7 +1964,7 @@ "documentation":"

The size of the layer archive in bytes.

" } }, - "documentation":"

A function layer.

" + "documentation":"

An AWS Lambda layer.

" }, "LayerArn":{ "type":"string", @@ -2018,7 +2016,7 @@ "documentation":"

The base64-encoded contents of the layer archive. AWS SDK and AWS CLI clients handle the encoding for you.

" } }, - "documentation":"

A ZIP archive that contains the contents of the function layer. You can specify either an Amazon S3 location, or upload a layer archive directly.

" + "documentation":"

A ZIP archive that contains the contents of an AWS Lambda layer. You can specify either an Amazon S3 location, or upload a layer archive directly.

" }, "LayerVersionContentOutput":{ "type":"structure", @@ -2036,7 +2034,7 @@ "documentation":"

The size of the layer archive in bytes.

" } }, - "documentation":"

Details about a layer version.

" + "documentation":"

Details about a version of an AWS Lambda layer.

" }, "LayerVersionNumber":{"type":"long"}, "LayerVersionsList":{ @@ -2071,7 +2069,7 @@ "documentation":"

The layer's open-source license.

" } }, - "documentation":"

Details about a layer version.

" + "documentation":"

Details about a version of an AWS Lambda layer.

" }, "LayersList":{ "type":"list", @@ -2093,7 +2091,7 @@ "documentation":"

The newest version of the layer.

" } }, - "documentation":"

Details about a function layer.

" + "documentation":"

Details about an AWS Lambda layer.

" }, "LayersReferenceList":{ "type":"list", @@ -2109,25 +2107,25 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "FunctionVersion":{ "shape":"Version", - "documentation":"

If you specify this optional parameter, the API returns only the aliases that are pointing to the specific Lambda function version, otherwise the API returns all of the aliases created for the Lambda function.

", + "documentation":"

Specify a function version to only list aliases that invoke that version.

", "location":"querystring", "locationName":"FunctionVersion" }, "Marker":{ "shape":"String", - "documentation":"

Optional string. An opaque pagination token returned from a previous ListAliases operation. If present, indicates where to continue the listing.

", + "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", "location":"querystring", "locationName":"Marker" }, "MaxItems":{ "shape":"MaxListItems", - "documentation":"

Optional integer. Specifies the maximum number of aliases to return in response. This parameter value must be greater than 0.

", + "documentation":"

Limit the number of aliases returned.

", "location":"querystring", "locationName":"MaxItems" } @@ -2138,7 +2136,7 @@ "members":{ "NextMarker":{ "shape":"String", - "documentation":"

A string, present if there are more aliases.

" + "documentation":"

The pagination token that's included if more results are available.

" }, "Aliases":{ "shape":"AliasList", @@ -2193,25 +2191,25 @@ "members":{ "MasterRegion":{ "shape":"MasterRegion", - "documentation":"

Specify a region (e.g. us-east-2) to only list functions that were created in that region, or ALL to include functions replicated from any region. If specified, you also must specify the FunctionVersion.

", + "documentation":"

For Lambda@Edge functions, the AWS Region of the master function. For example, us-east-2 or ALL. If specified, you must set FunctionVersion to ALL.

", "location":"querystring", "locationName":"MasterRegion" }, "FunctionVersion":{ "shape":"FunctionVersion", - "documentation":"

Set to ALL to list all published versions. If not specified, only the latest unpublished version ARN is returned.

", + "documentation":"

Set to ALL to include entries for all published versions of each function.

", "location":"querystring", "locationName":"FunctionVersion" }, "Marker":{ "shape":"String", - "documentation":"

Optional string. An opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing.

", + "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", "location":"querystring", "locationName":"Marker" }, "MaxItems":{ "shape":"MaxListItems", - "documentation":"

Optional integer. Specifies the maximum number of AWS Lambda functions to return in response. This parameter value must be greater than 0. The absolute maximum of AWS Lambda functions that can be returned is 50.

", + "documentation":"

Specify a value between 1 and 50 to limit the number of functions in the response.

", "location":"querystring", "locationName":"MaxItems" } @@ -2222,7 +2220,7 @@ "members":{ "NextMarker":{ "shape":"String", - "documentation":"

A string, present if there are more functions.

" + "documentation":"

The pagination token that's included if more results are available.

" }, "Functions":{ "shape":"FunctionList", @@ -2243,7 +2241,7 @@ }, "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -2316,7 +2314,7 @@ "members":{ "Resource":{ "shape":"FunctionArn", - "documentation":"

The ARN (Amazon Resource Name) of the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

", + "documentation":"

The function's Amazon Resource Name (ARN).

", "location":"uri", "locationName":"ARN" } @@ -2327,7 +2325,7 @@ "members":{ "Tags":{ "shape":"Tags", - "documentation":"

The list of tags assigned to the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

The function's tags.

" } } }, @@ -2337,19 +2335,19 @@ "members":{ "FunctionName":{ "shape":"NamespacedFunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Marker":{ "shape":"String", - "documentation":"

Optional string. An opaque pagination token returned from a previous ListVersionsByFunction operation. If present, indicates where to continue the listing.

", + "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", "location":"querystring", "locationName":"Marker" }, "MaxItems":{ "shape":"MaxListItems", - "documentation":"

Optional integer. Specifies the maximum number of AWS Lambda function versions to return in response. This parameter value must be greater than 0.

", + "documentation":"

Limit the number of versions that are returned.

", "location":"querystring", "locationName":"MaxItems" } @@ -2360,7 +2358,7 @@ "members":{ "NextMarker":{ "shape":"String", - "documentation":"

A string, present if there are more function versions.

" + "documentation":"

The pagination token that's included if more results are available.

" }, "Versions":{ "shape":"FunctionList", @@ -2454,7 +2452,7 @@ "members":{ "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -2468,7 +2466,7 @@ }, "CompatibleRuntimes":{ "shape":"CompatibleRuntimes", - "documentation":"

A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.

" + "documentation":"

A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.

" }, "LicenseInfo":{ "shape":"LicenseInfo", @@ -2485,7 +2483,7 @@ }, "LayerArn":{ "shape":"LayerArn", - "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" + "documentation":"

The ARN of the layer.

" }, "LayerVersionArn":{ "shape":"LayerVersionArn", @@ -2519,21 +2517,21 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "CodeSha256":{ "shape":"String", - "documentation":"

The SHA256 hash of the deployment package you want to publish. This provides validation on the code you are publishing. If you provide this parameter, the value must match the SHA256 of the $LATEST version for the publication to succeed. You can use the DryRun parameter of UpdateFunctionCode to verify the hash value that will be returned before publishing your new version.

" + "documentation":"

Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. You can get the hash for the version that you uploaded from the output of UpdateFunctionCode.

" }, "Description":{ "shape":"Description", - "documentation":"

The description for the version you are publishing. If not provided, AWS Lambda copies the description from the $LATEST version.

" + "documentation":"

A description for the version to override the description in the function configuration.

" }, "RevisionId":{ "shape":"String", - "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias.

" + "documentation":"

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid publishing a version if the function configuration has changed since you last updated it.

" } } }, @@ -2546,13 +2544,13 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - my-function.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "ReservedConcurrentExecutions":{ "shape":"ReservedConcurrentExecutions", - "documentation":"

The concurrent execution limit reserved for this function.

" + "documentation":"

The number of simultaneous executions to reserve for the function.

" } } }, @@ -2572,7 +2570,7 @@ "members":{ "LayerName":{ "shape":"LayerName", - "documentation":"

The name of the layer.

", + "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", "location":"uri", "locationName":"LayerName" }, @@ -2605,7 +2603,7 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function, version, or alias.

Name formats

  • Function name - my-function (name-only), my-function:v1 (with alias).

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, @@ -2623,7 +2621,7 @@ }, "RevisionId":{ "shape":"String", - "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias.

", + "documentation":"

Only update the policy if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

", "location":"querystring", "locationName":"RevisionId" } @@ -2635,7 +2633,7 @@ "Type":{"shape":"String"}, "message":{"shape":"String"} }, - "documentation":"

The request payload exceeded the Invoke request body JSON input limit. For more information, see Limits.

", + "documentation":"

The request payload exceeded the Invoke request body JSON input limit. For more information, see Limits.

", "error":{"httpStatusCode":413}, "exception":true }, @@ -2669,7 +2667,7 @@ "Type":{"shape":"String"}, "Message":{"shape":"String"} }, - "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSoure Mapping in CREATING, or tried to delete a EventSoure mapping currently in the UPDATING state.

", + "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSource Mapping in CREATING, or tried to delete a EventSource mapping currently in the UPDATING state.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -2784,13 +2782,13 @@ "members":{ "Resource":{ "shape":"FunctionArn", - "documentation":"

The ARN (Amazon Resource Name) of the Lambda function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

", + "documentation":"

The function's Amazon Resource Name (ARN).

", "location":"uri", "locationName":"ARN" }, "Tags":{ "shape":"Tags", - "documentation":"

The list of tags (key-value pairs) you are assigning to the Lambda function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

" + "documentation":"

A list of tags to apply to the function.

" } } }, @@ -2828,7 +2826,7 @@ "message":{"shape":"String"}, "Reason":{"shape":"ThrottleReason"} }, - "documentation":"

Request throughput limit exceeded

", + "documentation":"

Request throughput limit exceeded.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -2882,13 +2880,13 @@ "members":{ "Resource":{ "shape":"FunctionArn", - "documentation":"

The ARN (Amazon Resource Name) of the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

", + "documentation":"

The function's Amazon Resource Name (ARN).

", "location":"uri", "locationName":"ARN" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

The list of tag keys to be deleted from the function. For more information, see Tagging Lambda Functions in the AWS Lambda Developer Guide.

", + "documentation":"

A list of tag keys to remove from the function.

", "location":"querystring", "locationName":"tagKeys" } @@ -2903,31 +2901,31 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Name":{ "shape":"Alias", - "documentation":"

The alias name.

", + "documentation":"

The name of the alias.

", "location":"uri", "locationName":"Name" }, "FunctionVersion":{ "shape":"Version", - "documentation":"

Using this parameter you can change the Lambda function version to which the alias points.

" + "documentation":"

The function version that the alias invokes.

" }, "Description":{ "shape":"Description", - "documentation":"

You can change the description of the alias using this parameter.

" + "documentation":"

A description of the alias.

" }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

Specifies an additional version your alias can point to, allowing you to dictate what percentage of traffic will invoke each version. For more information, see Traffic Shifting Using Aliases.

" + "documentation":"

The routing configuration of the alias.

" }, "RevisionId":{ "shape":"String", - "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias.

" + "documentation":"

Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.

" } } }, @@ -2961,37 +2959,37 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - my-function.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "ZipFile":{ "shape":"Blob", - "documentation":"

The contents of your zip file containing your deployment package. If you are using the web API directly, the contents of the zip file must be base64-encoded. If you are using the AWS SDKs or the AWS CLI, the SDKs or CLI will do the encoding for you. For more information about creating a .zip file, see Execution Permissions.

" + "documentation":"

The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for you.

" }, "S3Bucket":{ "shape":"S3Bucket", - "documentation":"

Amazon S3 bucket name where the .zip file containing your deployment package is stored. This bucket must reside in the same AWS Region where you are creating the Lambda function.

" + "documentation":"

An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.

" }, "S3Key":{ "shape":"S3Key", - "documentation":"

The Amazon S3 object (the deployment package) key name you want to upload.

" + "documentation":"

The Amazon S3 key of the deployment package.

" }, "S3ObjectVersion":{ "shape":"S3ObjectVersion", - "documentation":"

The Amazon S3 object (the deployment package) version you want to upload.

" + "documentation":"

For versioned objects, the version of the deployment package object to use.

" }, "Publish":{ "shape":"Boolean", - "documentation":"

This boolean parameter can be used to request AWS Lambda to update the Lambda function and publish a version as an atomic operation.

" + "documentation":"

Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.

" }, "DryRun":{ "shape":"Boolean", - "documentation":"

This boolean parameter can be used to test your request to AWS Lambda to update the Lambda function and publish a version as an atomic operation. It will do all necessary computation and validation of your code but will not upload it or a publish a version. Each time this operation is invoked, the CodeSha256 hash value of the provided code will also be computed and returned in the response.

" + "documentation":"

Set to true to validate the request parameters and access permissions without modifying the function code.

" }, "RevisionId":{ "shape":"String", - "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either using using either GetFunction or GetAlias.

" + "documentation":"

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" } } }, @@ -3001,49 +2999,49 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - my-function.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.

  • Partial ARN - 123456789012:function:my-function.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", "location":"uri", "locationName":"FunctionName" }, "Role":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when it executes your function.

" + "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" }, "Handler":{ "shape":"Handler", - "documentation":"

The function that Lambda calls to begin executing your function. For Node.js, it is the module-name.export value in your function.

" + "documentation":"

The name of the method within your code that Lambda calls to execute your function. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model.

" }, "Description":{ "shape":"Description", - "documentation":"

A short user-defined function description. AWS Lambda does not use this value. Assign a meaningful description as you see fit.

" + "documentation":"

A description of the function.

" }, "Timeout":{ "shape":"Timeout", - "documentation":"

The amount of time that Lambda allows a function to run before terminating it. The default is 3 seconds. The maximum allowed value is 900 seconds.

" + "documentation":"

The amount of time that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds.

" }, "MemorySize":{ "shape":"MemorySize", - "documentation":"

The amount of memory, in MB, your Lambda function is given. AWS Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

" + "documentation":"

The amount of memory that your function has access to. Increasing the function's memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.

" }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

Specify security groups and subnets in a VPC to which your Lambda function needs access.

" + "documentation":"

For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. For more information, see VPC Settings.

" }, "Environment":{ "shape":"Environment", - "documentation":"

The parent object that contains your environment's configuration settings.

" + "documentation":"

Environment variables that are accessible from function code during execution.

" }, "Runtime":{ "shape":"Runtime", - "documentation":"

The runtime version for the function.

" + "documentation":"

The identifier of the function's runtime.

" }, "DeadLetterConfig":{ "shape":"DeadLetterConfig", - "documentation":"

A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues.

" + "documentation":"

A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues.

" }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's environment variables. If you elect to use the AWS Lambda default service key, pass in an empty string (\"\") for this parameter.

" + "documentation":"

The ARN of the AWS Key Management Service (AWS KMS) key that's used to encrypt your function's environment variables. If it's not provided, AWS Lambda uses a default service key.

" }, "TracingConfig":{ "shape":"TracingConfig", @@ -3051,11 +3049,11 @@ }, "RevisionId":{ "shape":"String", - "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either GetFunction or GetAlias.

" + "documentation":"

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" }, "Layers":{ "shape":"LayerList", - "documentation":"

A list of function layers to add to the function's execution environment.

" + "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" } } }, @@ -3077,7 +3075,7 @@ "documentation":"

A list of VPC security groups IDs.

" } }, - "documentation":"

The VPC security groups and subnets attached to a Lambda function.

" + "documentation":"

The VPC security groups and subnets that are attached to a Lambda function.

" }, "VpcConfigResponse":{ "type":"structure", @@ -3095,7 +3093,7 @@ "documentation":"

The ID of the VPC.

" } }, - "documentation":"

The VPC security groups and subnets attached to a Lambda function.

" + "documentation":"

The VPC security groups and subnets that are attached to a Lambda function.

" }, "VpcId":{"type":"string"}, "Weight":{ @@ -3104,5 +3102,5 @@ "min":0.0 } }, - "documentation":"AWS Lambda

Overview

This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, see What is AWS Lambda, and for information about how the service works, see AWS Lambda: How it Works in the AWS Lambda Developer Guide.

" + "documentation":"AWS Lambda

Overview

This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, see What is AWS Lambda, and for information about how the service works, see AWS Lambda: How it Works in the AWS Lambda Developer Guide.

" } diff --git a/botocore/data/license-manager/2018-08-01/paginators-1.json b/botocore/data/license-manager/2018-08-01/paginators-1.json index ea142457..03a3ca4d 100644 --- a/botocore/data/license-manager/2018-08-01/paginators-1.json +++ b/botocore/data/license-manager/2018-08-01/paginators-1.json @@ -1,3 +1,34 @@ { - "pagination": {} + "pagination": { + "ListAssociationsForLicenseConfiguration": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LicenseConfigurationAssociations" + }, + "ListLicenseConfigurations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LicenseConfigurations" + }, + "ListLicenseSpecificationsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LicenseSpecifications" + }, + "ListResourceInventory": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ResourceInventoryList" + }, + "ListUsageForLicenseConfiguration": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LicenseConfigurationUsageList" + } + } } diff --git a/botocore/data/lightsail/2016-11-28/paginators-1.json b/botocore/data/lightsail/2016-11-28/paginators-1.json index 7c9c475f..fbea9383 100644 --- a/botocore/data/lightsail/2016-11-28/paginators-1.json +++ b/botocore/data/lightsail/2016-11-28/paginators-1.json @@ -44,6 +44,61 @@ "input_token": "pageToken", "output_token": "nextPageToken", "result_key": "staticIps" + }, + "GetCloudFormationStackRecords": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "cloudFormationStackRecords" + }, + "GetDiskSnapshots": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "diskSnapshots" + }, + "GetDisks": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "disks" + }, + "GetExportSnapshotRecords": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "exportSnapshotRecords" + }, + "GetLoadBalancers": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "loadBalancers" + }, + "GetRelationalDatabaseBlueprints": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "blueprints" + }, + "GetRelationalDatabaseBundles": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "bundles" + }, + "GetRelationalDatabaseEvents": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "relationalDatabaseEvents" + }, + "GetRelationalDatabaseParameters": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "parameters" + }, + "GetRelationalDatabaseSnapshots": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "relationalDatabaseSnapshots" + }, + "GetRelationalDatabases": { + "input_token": "pageToken", + "output_token": "nextPageToken", + "result_key": "relationalDatabases" } } } diff --git a/botocore/data/lightsail/2016-11-28/service-2.json b/botocore/data/lightsail/2016-11-28/service-2.json index 86ad9ae5..565311c5 100644 --- a/botocore/data/lightsail/2016-11-28/service-2.json +++ b/botocore/data/lightsail/2016-11-28/service-2.json @@ -219,7 +219,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a snapshot of a block storage disk. You can use snapshots for backups, to make copies of disks, and to save data before shutting down a Lightsail instance.

You can take a snapshot of an attached disk that is in use; however, snapshots only capture data that has been written to your disk at the time the snapshot command is issued. This may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the disk long enough to take a snapshot, your snapshot should be complete. Nevertheless, if you cannot pause all file writes to the disk, you should unmount the disk from within the Lightsail instance, issue the create disk snapshot command, and then remount the disk to ensure a consistent and complete snapshot. You may remount and use your disk while the snapshot status is pending.

The create disk snapshot operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a snapshot of a block storage disk. You can use snapshots for backups, to make copies of disks, and to save data before shutting down a Lightsail instance.

You can take a snapshot of an attached disk that is in use; however, snapshots only capture data that has been written to your disk at the time the snapshot command is issued. This may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the disk long enough to take a snapshot, your snapshot should be complete. Nevertheless, if you cannot pause all file writes to the disk, you should unmount the disk from within the Lightsail instance, issue the create disk snapshot command, and then remount the disk to ensure a consistent and complete snapshot. You may remount and use your disk while the snapshot status is pending.

You can also use this operation to create a snapshot of an instance's system volume. You might want to do this, for example, to recover data from the system volume of a botched instance or to create a backup of the system volume like you would for a block storage disk. To create a snapshot of a system volume, just define the instance name parameter when issuing the snapshot command, and a snapshot of the defined instance's system volume will be created. After the snapshot is available, you can create a block storage disk from the snapshot and attach it to a running instance to access the data on the disk.

The create disk snapshot operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" }, "CreateDomain":{ "name":"CreateDomain", @@ -732,7 +732,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Exports a Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack operation to create new Amazon EC2 instances.

Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.

The export snapshot operation supports tag-based access control via resource tags applied to the resource identified by sourceSnapshotName. For more information, see the Lightsail Dev Guide.

Use the get instance snapshots or get disk snapshots operations to get a list of snapshots that you can export to Amazon EC2.

" + "documentation":"

Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack operation to create new Amazon EC2 instances.

Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.

The export snapshot operation supports tag-based access control via resource tags applied to the resource identified by sourceSnapshotName. For more information, see the Lightsail Dev Guide.

Use the get instance snapshots or get disk snapshots operations to get a list of snapshots that you can export to Amazon EC2.

" }, "GetActiveNames":{ "name":"GetActiveNames", @@ -1663,7 +1663,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Restarts a specific instance. When your Amazon Lightsail instance is finished rebooting, Lightsail assigns a new public IP address. To use the same IP address after restarting, create a static IP address and attach it to the instance.

The reboot instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Restarts a specific instance.

The reboot instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" }, "RebootRelationalDatabase":{ "name":"RebootRelationalDatabase", @@ -1720,7 +1720,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance operation.

The start instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance operation.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The start instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" }, "StartRelationalDatabase":{ "name":"StartRelationalDatabase", @@ -1758,7 +1758,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Stops a specific Amazon Lightsail instance that is currently running.

The stop instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Stops a specific Amazon Lightsail instance that is currently running.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The stop instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" }, "StopRelationalDatabase":{ "name":"StopRelationalDatabase", @@ -2435,19 +2435,20 @@ }, "CreateDiskSnapshotRequest":{ "type":"structure", - "required":[ - "diskName", - "diskSnapshotName" - ], + "required":["diskSnapshotName"], "members":{ "diskName":{ "shape":"ResourceName", - "documentation":"

The unique name of the source disk (e.g., my-source-disk).

" + "documentation":"

The unique name of the source disk (e.g., Disk-Virginia-1).

This parameter cannot be defined together with the instance name parameter. The disk name and instance name parameters are mutually exclusive.

" }, "diskSnapshotName":{ "shape":"ResourceName", "documentation":"

The name of the destination disk snapshot (e.g., my-disk-snapshot) based on the source disk.

" }, + "instanceName":{ + "shape":"ResourceName", + "documentation":"

The unique name of the source instance (e.g., Amazon_Linux-512MB-Virginia-1). When this is defined, a snapshot of the instance's system volume is created.

This parameter cannot be defined together with the disk name parameter. The instance name and disk name parameters are mutually exclusive.

" + }, "tags":{ "shape":"TagList", "documentation":"

The tag keys and optional values to add to the resource during create.

To tag a resource after it has been created, see the tag resource operation.

" @@ -3391,11 +3392,19 @@ }, "fromDiskName":{ "shape":"ResourceName", - "documentation":"

The unique name of the source disk from which you are creating the disk snapshot.

" + "documentation":"

The unique name of the source disk from which the disk snapshot was created.

" }, "fromDiskArn":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Resource Name (ARN) of the source disk from which you are creating the disk snapshot.

" + "documentation":"

The Amazon Resource Name (ARN) of the source disk from which the disk snapshot was created.

" + }, + "fromInstanceName":{ + "shape":"ResourceName", + "documentation":"

The unique name of the source instance from which the disk (system volume) snapshot was created.

" + }, + "fromInstanceArn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the source instance from which the disk (system volume) snapshot was created.

" } }, "documentation":"

Describes a block storage disk snapshot.

" @@ -4888,7 +4897,7 @@ }, "portInfoSource":{ "shape":"PortInfoSourceType", - "documentation":"

The port configuration to use for the new Amazon EC2 instance.

The following configuration options are available:

  • DEFAULT — Use the default firewall settings from the image.

  • INSTANCE — Use the firewall settings from the source Lightsail instance.

  • NONE — Default to Amazon EC2.

" + "documentation":"

The port configuration to use for the new Amazon EC2 instance.

The following configuration options are available:

  • DEFAULT — Use the default firewall settings from the image.

  • INSTANCE — Use the firewall settings from the source Lightsail instance.

  • NONE — Default to Amazon EC2.

  • CLOSED — All ports closed.

" }, "userData":{ "shape":"string", @@ -6029,7 +6038,8 @@ "enum":[ "DEFAULT", "INSTANCE", - "NONE" + "NONE", + "CLOSED" ] }, "PortList":{ diff --git a/botocore/data/logs/2014-03-28/paginators-1.json b/botocore/data/logs/2014-03-28/paginators-1.json index 57449c57..c5c1645f 100644 --- a/botocore/data/logs/2014-03-28/paginators-1.json +++ b/botocore/data/logs/2014-03-28/paginators-1.json @@ -38,6 +38,24 @@ "events", "searchedLogStreams" ] + }, + "DescribeExportTasks": { + "input_token": "nextToken", + "limit_key": "limit", + "output_token": "nextToken", + "result_key": "exportTasks" + }, + "DescribeQueries": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "queries" + }, + "DescribeResourcePolicies": { + "input_token": "nextToken", + "limit_key": "limit", + "output_token": "nextToken", + "result_key": "resourcePolicies" } } } diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index ab66bf17..c86d7916 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -541,7 +541,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group to query, the query string to use, and the time to query.

For more information, see CloudWatch Logs Insights Query Syntax.

" + "documentation":"

Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query, and the query string to use.

For more information, see CloudWatch Logs Insights Query Syntax.

" }, "StopQuery":{ "name":"StopQuery", @@ -2243,11 +2243,11 @@ }, "startTime":{ "shape":"Timestamp", - "documentation":"

The time to start the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.

" + "documentation":"

The beginning of the time range to query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.

" }, "endTime":{ "shape":"Timestamp", - "documentation":"

The time to end this query, if it is still running. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.

" + "documentation":"

The end of the time range to query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.

" }, "queryString":{ "shape":"QueryString", diff --git a/botocore/data/macie/2017-12-19/paginators-1.json b/botocore/data/macie/2017-12-19/paginators-1.json index ea142457..f7b6a2c2 100644 --- a/botocore/data/macie/2017-12-19/paginators-1.json +++ b/botocore/data/macie/2017-12-19/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListMemberAccounts": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "memberAccounts" + }, + "ListS3Resources": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "s3Resources" + } + } } diff --git a/botocore/data/marketplace-entitlement/2017-01-11/paginators-1.json b/botocore/data/marketplace-entitlement/2017-01-11/paginators-1.json index ea142457..8dbf525e 100644 --- a/botocore/data/marketplace-entitlement/2017-01-11/paginators-1.json +++ b/botocore/data/marketplace-entitlement/2017-01-11/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "GetEntitlements": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Entitlements" + } + } } diff --git a/botocore/data/mediaconnect/2018-11-14/paginators-1.json b/botocore/data/mediaconnect/2018-11-14/paginators-1.json index ce5b17e3..bfc962af 100644 --- a/botocore/data/mediaconnect/2018-11-14/paginators-1.json +++ b/botocore/data/mediaconnect/2018-11-14/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Flows" + }, + "ListEntitlements": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Entitlements" } } } diff --git a/botocore/data/mediaconnect/2018-11-14/service-2.json b/botocore/data/mediaconnect/2018-11-14/service-2.json index d375646d..3ce3d24a 100644 --- a/botocore/data/mediaconnect/2018-11-14/service-2.json +++ b/botocore/data/mediaconnect/2018-11-14/service-2.json @@ -250,6 +250,32 @@ } ], "documentation" : "Displays a list of flows that are associated with this account. This request returns a paginated result." }, + "ListTagsForResource" : { + "name" : "ListTagsForResource", + "http" : { + "method" : "GET", + "requestUri" : "/tags/{resourceArn}", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListTagsForResourceRequest" + }, + "output" : { + "shape" : "ListTagsForResourceResponse", + "documentation" : "AWS Elemental MediaConnect listed the tags associated with the resource." + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "The requested resource was not found" + }, { + "shape" : "BadRequestException", + "documentation" : "The client performed an invalid request" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "Internal service error" + } ], + "documentation" : "Lists all tags associated with the resource." + }, "RemoveFlowOutput" : { "name" : "RemoveFlowOutput", "http" : { @@ -390,6 +416,50 @@ } ], "documentation" : "Stops a flow." }, + "TagResource" : { + "name" : "TagResource", + "http" : { + "method" : "POST", + "requestUri" : "/tags/{resourceArn}", + "responseCode" : 204 + }, + "input" : { + "shape" : "TagResourceRequest" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "The requested resource was not found" + }, { + "shape" : "BadRequestException", + "documentation" : "The client performed an invalid request" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "Internal service error" + } ], + "documentation" : "Associates the specified tags to a resource. If the request does not mention an existing tag associated with the resource, that tag is not changed." + }, + "UntagResource" : { + "name" : "UntagResource", + "http" : { + "method" : "DELETE", + "requestUri" : "/tags/{resourceArn}", + "responseCode" : 204 + }, + "input" : { + "shape" : "UntagResourceRequest" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "The requested resource was not found" + }, { + "shape" : "BadRequestException", + "documentation" : "The client performed an invalid request" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "Internal service error" + } ], + "documentation" : "Deletes the specified tags from a resource." + }, "UpdateFlowEntitlement" : { "name" : "UpdateFlowEntitlement", "http" : { @@ -1012,6 +1082,28 @@ } } }, + "ListTagsForResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "resourceArn", + "documentation" : "The Amazon Resource Name (ARN) of the resource that you want to view tags for." + } + }, + "required" : [ "ResourceArn" ] + }, + "ListTagsForResourceResponse" : { + "type" : "structure", + "members" : { + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "A map from tag keys to values. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." + } + } + }, "ListedEntitlement" : { "type" : "structure", "members" : { @@ -1419,6 +1511,24 @@ } } }, + "TagResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "resourceArn", + "documentation" : "The Amazon Resource Name (ARN) of the resource that you want to add tags to." + }, + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "A map from tag keys to values. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." + } + }, + "documentation" : "The tags to add to the resource. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", + "required" : [ "ResourceArn", "Tags" ] + }, "TooManyRequestsException" : { "type" : "structure", "members" : { @@ -1467,6 +1577,24 @@ "documentation" : "Attributes related to the transport stream that are used in a source or output.", "required" : [ "Protocol" ] }, + "UntagResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "resourceArn", + "documentation" : "The Amazon Resource Name (ARN) of the resource that you want to remove tags from." + }, + "TagKeys" : { + "shape" : "__listOf__string", + "location" : "querystring", + "locationName" : "tagKeys", + "documentation" : "The keys of the tags to be removed." + } + }, + "required" : [ "TagKeys", "ResourceArn" ] + }, "UpdateEncryption" : { "type" : "structure", "members" : { @@ -1747,6 +1875,15 @@ "__long" : { "type" : "long" }, + "__mapOf__string" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "__string" + } + }, "__string" : { "type" : "string" }, diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 4c2c3529..cb523b49 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -92,7 +92,7 @@ "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], - "documentation": "Permanently remove a job from a queue. Once you have canceled a job, you can't start it again. You can't delete a running job." + "documentation": "Permanently cancel a job. Once you have canceled a job, you can't start it again." }, "CreateJob": { "name": "CreateJob", @@ -1093,7 +1093,7 @@ "Bitrate": { "shape": "__integerMin6000Max1024000", "locationName": "bitrate", - "documentation": "Average bitrate in bits/second. Defaults and valid values depend on rate control mode and profile." + "documentation": "Average bitrate in bits/second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values you choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate (sampleRate). Default values depend on Bitrate control mode and Profile." }, "CodecProfile": { "shape": "AacCodecProfile", @@ -1234,9 +1234,31 @@ }, "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3." }, + "AccelerationMode": { + "type": "string", + "documentation": "Acceleration configuration for the job.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "AccelerationSettings": { + "type": "structure", + "members": { + "Mode": { + "shape": "AccelerationMode", + "locationName": "mode", + "documentation": "Acceleration configuration for the job." + } + }, + "documentation": "Acceleration settings for job execution.", + "required": [ + "Mode" + ] + }, "AfdSignaling": { "type": "string", - "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", + "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", "enum": [ "NONE", "AUTO", @@ -1277,7 +1299,7 @@ }, "AntiAlias": { "type": "string", - "documentation": "Enable Anti-alias (AntiAlias) to enhance sharp edges in video output when your input resolution is much larger than your output resolution. Default is enabled.", + "documentation": "You no longer need to specify the anti-alias filter. It's now automatically applied to all outputs. This property is deprecated.", "enum": [ "DISABLED", "ENABLED" @@ -1626,6 +1648,11 @@ "locationName": "fontResolution", "documentation": "Font resolution in DPI (dots per inch); default is 96 dpi.\nAll burn-in and DVB-Sub font settings must match." }, + "FontScript": { + "shape": "FontScript", + "locationName": "fontScript", + "documentation": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset. This is used to help determine the appropriate font for rendering burn-in captions." + }, "FontSize": { "shape": "__integerMin0Max96", "locationName": "fontSize", @@ -1764,7 +1791,7 @@ "CustomLanguageCode": { "shape": "__stringMin3Max3PatternAZaZ3", "locationName": "customLanguageCode", - "documentation": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code" + "documentation": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text." }, "DestinationSettings": { "shape": "CaptionDestinationSettings", @@ -1773,7 +1800,7 @@ "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode", - "documentation": "Indicates the language of the caption output track." + "documentation": "Specify the language of this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text." }, "LanguageDescription": { "shape": "__string", @@ -1789,7 +1816,7 @@ "CustomLanguageCode": { "shape": "__stringMin3Max3PatternAZaZ3", "locationName": "customLanguageCode", - "documentation": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code" + "documentation": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text." }, "DestinationSettings": { "shape": "CaptionDestinationSettings", @@ -1798,7 +1825,7 @@ "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode", - "documentation": "Indicates the language of the caption output track." + "documentation": "Specify the language of this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text." }, "LanguageDescription": { "shape": "__string", @@ -1823,6 +1850,10 @@ "shape": "DvbSubDestinationSettings", "locationName": "dvbSubDestinationSettings" }, + "EmbeddedDestinationSettings": { + "shape": "EmbeddedDestinationSettings", + "locationName": "embeddedDestinationSettings" + }, "SccDestinationSettings": { "shape": "SccDestinationSettings", "locationName": "sccDestinationSettings" @@ -1840,7 +1871,7 @@ }, "CaptionDestinationType": { "type": "string", - "documentation": "Type of Caption output, including Burn-In, Embedded (with or without SCTE20), SCC, SMI, SRT, TTML, WebVTT, DVB-Sub, Teletext.", + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).", "enum": [ "BURN_IN", "DVB_SUB", @@ -1901,6 +1932,10 @@ "TeletextSourceSettings": { "shape": "TeletextSourceSettings", "locationName": "teletextSourceSettings" + }, + "TrackSourceSettings": { + "shape": "TrackSourceSettings", + "locationName": "trackSourceSettings" } }, "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input." @@ -1919,7 +1954,8 @@ "SRT", "SMI", "TELETEXT", - "NULL_SOURCE" + "NULL_SOURCE", + "IMSC" ] }, "ChannelMapping": { @@ -2138,7 +2174,8 @@ }, "Hdr10Metadata": { "shape": "Hdr10Metadata", - "locationName": "hdr10Metadata" + "locationName": "hdr10Metadata", + "documentation": "Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. Note that these settings are not color correction." }, "Hue": { "shape": "__integerMinNegative180Max180", @@ -2261,6 +2298,11 @@ "CreateJobRequest": { "type": "structure", "members": { + "AccelerationSettings": { + "shape": "AccelerationSettings", + "locationName": "accelerationSettings", + "documentation": "This is a beta feature. If you are interested in using this feature, please contact AWS customer support." + }, "BillingTagsSource": { "shape": "BillingTagsSource", "locationName": "billingTagsSource" @@ -2290,6 +2332,11 @@ "shape": "JobSettings", "locationName": "settings" }, + "StatusUpdateIntervalInSecs": { + "shape": "__integerMin10Max600", + "locationName": "statusUpdateIntervalInSecs", + "documentation": "Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." + }, "UserMetadata": { "shape": "__mapOf__string", "locationName": "userMetadata", @@ -2313,6 +2360,11 @@ "CreateJobTemplateRequest": { "type": "structure", "members": { + "AccelerationSettings": { + "shape": "AccelerationSettings", + "locationName": "accelerationSettings", + "documentation": "This is a beta feature. If you are interested in using this feature please contact AWS customer support." + }, "Category": { "shape": "__string", "locationName": "category", @@ -2337,6 +2389,11 @@ "shape": "JobTemplateSettings", "locationName": "settings" }, + "StatusUpdateIntervalInSecs": { + "shape": "__integerMin10Max600", + "locationName": "statusUpdateIntervalInSecs", + "documentation": "Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." + }, "Tags": { "shape": "__mapOf__string", "locationName": "tags", @@ -2780,6 +2837,11 @@ "locationName": "fontResolution", "documentation": "Font resolution in DPI (dots per inch); default is 96 dpi.\nAll burn-in and DVB-Sub font settings must match." }, + "FontScript": { + "shape": "FontScript", + "locationName": "fontScript", + "documentation": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset. This is used to help determine the appropriate font for rendering DVB-Sub captions." + }, "FontSize": { "shape": "__integerMin0Max96", "locationName": "fontSize", @@ -3143,6 +3205,17 @@ "DISABLED" ] }, + "EmbeddedDestinationSettings": { + "type": "structure", + "members": { + "Destination608ChannelNumber": { + "shape": "__integerMin1Max4", + "locationName": "destination608ChannelNumber", + "documentation": "Ignore this setting unless your input captions are SCC format and your output container is MXF. With this combination of input captions format and output container, you can optionally use this setting to replace the input channel number with the track number that you specify. Specify a different number for each output captions track. If you don't specify an output track number, the system uses the input channel number for the output channel number. This setting applies to each output individually. You can optionally combine two captions channels in your output. The two output channel numbers can be one of the following pairs: 1,3; 2,4; 1,4; or 2,3." + } + }, + "documentation": "Settings specific to embedded/ancillary caption outputs, including 608/708 Channel destination number." + }, "EmbeddedSourceSettings": { "type": "structure", "members": { @@ -3174,6 +3247,49 @@ }, "documentation": "Describes an account-specific API endpoint." }, + "EsamManifestConfirmConditionNotification": { + "type": "structure", + "members": { + "MccXml": { + "shape": "__stringPatternSNManifestConfirmConditionNotificationNS", + "locationName": "mccXml", + "documentation": "Provide your ESAM ManifestConfirmConditionNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the Manifest Conditioning instructions in the message that you supply." + } + }, + "documentation": "ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025." + }, + "EsamSettings": { + "type": "structure", + "members": { + "ManifestConfirmConditionNotification": { + "shape": "EsamManifestConfirmConditionNotification", + "locationName": "manifestConfirmConditionNotification", + "documentation": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML (mccXml)." + }, + "ResponseSignalPreroll": { + "shape": "__integerMin0Max30000", + "locationName": "responseSignalPreroll", + "documentation": "Specifies the stream distance, in milliseconds, between the SCTE 35 messages that the transcoder places and the splice points that they refer to. If the time between the start of the asset and the SCTE-35 message is less than this value, then the transcoder places the SCTE-35 marker at the beginning of the stream." + }, + "SignalProcessingNotification": { + "shape": "EsamSignalProcessingNotification", + "locationName": "signalProcessingNotification", + "documentation": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML (sccXml)." + } + }, + "documentation": "Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, you can ignore these settings." + }, + "EsamSignalProcessingNotification": { + "type": "structure", + "members": { + "SccXml": { + "shape": "__stringPatternSNSignalProcessingNotificationNS", + "locationName": "sccXml", + "documentation": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. If you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." + } + }, + "documentation": "ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025." + }, "ExceptionBody": { "type": "structure", "members": { @@ -3240,6 +3356,15 @@ }, "documentation": "Settings for File-based Captions in Source" }, + "FontScript": { + "type": "string", + "documentation": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset.", + "enum": [ + "AUTOMATIC", + "HANS", + "HANT" + ] + }, "ForbiddenException": { "type": "structure", "members": { @@ -3386,7 +3511,7 @@ }, "H264CodecLevel": { "type": "string", - "documentation": "H.264 Level.", + "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO).", "enum": [ "AUTO", "LEVEL_1", @@ -3453,7 +3578,7 @@ }, "H264FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -3461,7 +3586,7 @@ }, "H264FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during framerate conversion.", + "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -3600,12 +3725,12 @@ "FramerateDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", - "documentation": "When you use the API for transcode jobs that use framerate conversion, specify the framerate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use framerate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." }, "FramerateNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", - "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." + "documentation": "Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, "GopBReference": { "shape": "H264GopBReference", @@ -3852,7 +3977,7 @@ }, "H265FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -3860,7 +3985,7 @@ }, "H265FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during framerate conversion.", + "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -3996,12 +4121,12 @@ "FramerateDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", - "documentation": "Framerate denominator." + "documentation": "Frame rate denominator." }, "FramerateNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", - "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." + "documentation": "Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, "GopBReference": { "shape": "H265GopBReference", @@ -4125,7 +4250,8 @@ }, "WriteMp4PackagingType": { "shape": "H265WriteMp4PackagingType", - "locationName": "writeMp4PackagingType" + "locationName": "writeMp4PackagingType", + "documentation": "Use this setting only for outputs encoded with H.265 that are in CMAF or DASH output groups. If you include writeMp4PackagingType in your JSON job specification for other outputs, your video might not work properly with downstream systems and video players. If the location of parameter set NAL units don't matter in your workflow, ignore this setting. The service defaults to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. This makes your output compliant with this specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples." } }, "documentation": "Settings for H265 codec" @@ -4189,7 +4315,7 @@ }, "H265WriteMp4PackagingType": { "type": "string", - "documentation": "If HVC1, output that is H.265 will be marked as HVC1 and adhere to the ISO-IECJTC1-SC29_N13798_Text_ISOIEC_FDIS_14496-15_3rd_E spec which states that parameter set NAL units will be stored in the sample headers but not in the samples directly. If HEV1, then H.265 will be marked as HEV1 and parameter set NAL units will be written into the samples.", + "documentation": "Use this setting only for outputs encoded with H.265 that are in CMAF or DASH output groups. If you include writeMp4PackagingType in your JSON job specification for other outputs, your video might not work properly with downstream systems and video players. If the location of parameter set NAL units don't matter in your workflow, ignore this setting. The service defaults to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. This makes your output compliant with this specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples.", "enum": [ "HVC1", "HEV1" @@ -4259,7 +4385,7 @@ "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." } }, - "documentation": "Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the HVC1 for H.265 setting." + "documentation": "Use the \"HDR master display information\" (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000; each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the following settings. Set \"MP4 packaging type\" (writeMp4PackagingType) to HVC1 (HVC1). Set \"Profile\" (H265Settings > codecProfile) to Main10/High (MAIN10_HIGH). Set \"Level\" (H265Settings > codecLevel) to 5 (LEVEL_5)." }, "HlsAdMarkers": { "type": "string", @@ -4352,6 +4478,10 @@ "shape": "HlsInitializationVectorInManifest", "locationName": "initializationVectorInManifest" }, + "OfflineEncrypted": { + "shape": "HlsOfflineEncrypted", + "locationName": "offlineEncrypted" + }, "SpekeKeyProvider": { "shape": "SpekeKeyProvider", "locationName": "spekeKeyProvider" @@ -4525,6 +4655,14 @@ "INTEGER" ] }, + "HlsOfflineEncrypted": { + "type": "string", + "documentation": "Enable this setting to insert the EXT-X-SESSION-KEY element into the master playlist. This allows for offline Apple HLS FairPlay content protection.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "HlsOutputSelection": { "type": "string", "documentation": "Indicates whether the .m3u8 manifest file should be generated for this HLS output group.", @@ -4620,7 +4758,7 @@ "documentation": "Specify the images that you want to overlay on your video. The images must be PNG or TGA files." } }, - "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each input or output individually. This setting is disabled by default." + "documentation": "Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input or output individually. This setting is disabled by default." }, "Input": { "type": "structure", @@ -4647,16 +4785,16 @@ "DecryptionSettings": { "shape": "InputDecryptionSettings", "locationName": "decryptionSettings", - "documentation": "If the input file is encrypted, decryption settings to decrypt the media file" + "documentation": "Settings for decrypting any input files that are encrypted." }, "DenoiseFilter": { "shape": "InputDenoiseFilter", "locationName": "denoiseFilter" }, "FileInput": { - "shape": "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MM", + "shape": "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL", "locationName": "fileInput", - "documentation": "Use Input (fileInput) to define the source file used in the transcode job. There can be multiple inputs in a job. These inputs are concantenated, in the order they are specified in the job, to create the output." + "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." }, "FilterEnable": { "shape": "InputFilterEnable", @@ -4670,7 +4808,7 @@ "ImageInserter": { "shape": "ImageInserter", "locationName": "imageInserter", - "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default." + "documentation": "Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default." }, "InputClippings": { "shape": "__listOfInputClipping", @@ -4686,6 +4824,11 @@ "shape": "InputPsiControl", "locationName": "psiControl" }, + "SupplementalImps": { + "shape": "__listOf__stringPatternS3ASSETMAPXml", + "locationName": "supplementalImps", + "documentation": "Provide a list of any necessary supplemental IMPs. You need supplemental IMPs if the CPL that you're using for your input is in an incomplete IMP. Specify either the supplemental IMP directories with a trailing slash or the ASSETMAP.xml files. For example [\"s3://bucket/ov/\", \"s3://bucket/vf2/ASSETMAP.xml\"]. You don't need to specify the IMP that contains your input CPL, because the service automatically detects it." + }, "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource" @@ -4771,6 +4914,17 @@ "USE_PSI" ] }, + "InputRotate": { + "type": "string", + "documentation": "Use Rotate (InputRotate) to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", + "enum": [ + "DEGREE_0", + "DEGREES_90", + "DEGREES_180", + "DEGREES_270", + "AUTO" + ] + }, "InputTemplate": { "type": "structure", "members": { @@ -4809,7 +4963,7 @@ "ImageInserter": { "shape": "ImageInserter", "locationName": "imageInserter", - "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default." + "documentation": "Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default." }, "InputClippings": { "shape": "__listOfInputClipping", @@ -4851,12 +5005,12 @@ "Duration": { "shape": "__integerMin0Max2147483647", "locationName": "duration", - "documentation": "Set the time, in milliseconds, for the image to remain on the output video." + "documentation": "Specify the time, in milliseconds, for the image to remain on the output video. This duration includes fade-in time but not fade-out time." }, "FadeIn": { "shape": "__integerMin0Max2147483647", "locationName": "fadeIn", - "documentation": "Set the length of time, in milliseconds, between the Start time that you specify for the image insertion and the time that the image appears at full opacity. Full opacity is the level that you specify for the opacity setting. If you don't specify a value for Fade-in, the image will appear abruptly at the overlay start time." + "documentation": "Specify the length of time, in milliseconds, between the Start time that you specify for the image insertion and the time that the image appears at full opacity. Full opacity is the level that you specify for the opacity setting. If you don't specify a value for Fade-in, the image will appear abruptly at the overlay start time." }, "FadeOut": { "shape": "__integerMin0Max2147483647", @@ -4871,17 +5025,17 @@ "ImageInserterInput": { "shape": "__stringMin14PatternS3BmpBMPPngPNGTgaTGA", "locationName": "imageInserterInput", - "documentation": "Use Image location (imageInserterInput) to specify the Amazon S3 location of the image to be inserted into the output. Use a PNG or TGA file that fits inside the video frame." + "documentation": "Specify the Amazon S3 location of the image that you want to overlay on the video. Use a PNG or TGA file." }, "ImageX": { "shape": "__integerMin0Max2147483647", "locationName": "imageX", - "documentation": "Use Left (ImageX) to set the distance, in pixels, between the inserted image and the left edge of the video frame. Required for any image overlay that you specify." + "documentation": "Specify the distance, in pixels, between the inserted image and the left edge of the video frame. Required for any image overlay that you specify." }, "ImageY": { "shape": "__integerMin0Max2147483647", "locationName": "imageY", - "documentation": "Use Top (ImageY) to set the distance, in pixels, between the overlaid image and the top edge of the video frame. Required for any image overlay that you specify." + "documentation": "Specify the distance, in pixels, between the overlaid image and the top edge of the video frame. Required for any image overlay that you specify." }, "Layer": { "shape": "__integerMin0Max99", @@ -4896,7 +5050,7 @@ "StartTime": { "shape": "__stringPattern01D20305D205D", "locationName": "startTime", - "documentation": "Use Start time (StartTime) to specify the video timecode when the image is inserted in the output. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format." + "documentation": "Specify the timecode of the frame that you want the overlay to first appear on. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format. Remember to take into account your timecode source settings." }, "Width": { "shape": "__integerMin0Max2147483647", @@ -4904,7 +5058,7 @@ "documentation": "Specify the width of the inserted image in pixels. If you specify a value that's larger than the video resolution width, the service will crop your overlaid image to fit. To use the native width of the image, keep this setting blank." } }, - "documentation": "Settings that specify how your overlay appears." + "documentation": "Settings that specify how your still graphic overlay appears." }, "InternalServerErrorException": { "type": "structure", @@ -4923,6 +5077,11 @@ "Job": { "type": "structure", "members": { + "AccelerationSettings": { + "shape": "AccelerationSettings", + "locationName": "accelerationSettings", + "documentation": "Acceleration settings for job execution." + }, "Arn": { "shape": "__string", "locationName": "arn", @@ -4980,6 +5139,11 @@ "shape": "JobStatus", "locationName": "status" }, + "StatusUpdateIntervalInSecs": { + "shape": "__integerMin10Max600", + "locationName": "statusUpdateIntervalInSecs", + "documentation": "Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." + }, "Timing": { "shape": "Timing", "locationName": "timing" @@ -5009,6 +5173,11 @@ "locationName": "availBlanking", "documentation": "Settings for ad avail blanking. Video can be blanked or overlaid with an image, and audio muted during SCTE-35 triggered ad avails." }, + "Esam": { + "shape": "EsamSettings", + "locationName": "esam", + "documentation": "Settings for Event Signaling And Messaging (ESAM)." + }, "Inputs": { "shape": "__listOfInput", "locationName": "inputs", @@ -5054,6 +5223,11 @@ "JobTemplate": { "type": "structure", "members": { + "AccelerationSettings": { + "shape": "AccelerationSettings", + "locationName": "accelerationSettings", + "documentation": "Acceleration settings for job execution." + }, "Arn": { "shape": "__string", "locationName": "arn", @@ -5093,6 +5267,11 @@ "shape": "JobTemplateSettings", "locationName": "settings" }, + "StatusUpdateIntervalInSecs": { + "shape": "__integerMin10Max600", + "locationName": "statusUpdateIntervalInSecs", + "documentation": "Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." + }, "Type": { "shape": "Type", "locationName": "type", @@ -5127,6 +5306,11 @@ "locationName": "availBlanking", "documentation": "Settings for ad avail blanking. Video can be blanked or overlaid with an image, and audio muted during SCTE-35 triggered ad avails." }, + "Esam": { + "shape": "EsamSettings", + "locationName": "esam", + "documentation": "Settings for Event Signaling And Messaging (ESAM)." + }, "Inputs": { "shape": "__listOfInputTemplate", "locationName": "inputs", @@ -5604,6 +5788,14 @@ "EXCLUDE" ] }, + "M2tsForceTsVideoEbpOrder": { + "type": "string", + "documentation": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE).", + "enum": [ + "FORCE", + "DEFAULT" + ] + }, "M2tsNielsenId3": { "type": "string", "documentation": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", @@ -5628,6 +5820,17 @@ "CBR" ] }, + "M2tsScte35Esam": { + "type": "structure", + "members": { + "Scte35EsamPid": { + "shape": "__integerMin32Max8182", + "locationName": "scte35EsamPid", + "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated by ESAM." + } + }, + "documentation": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + }, "M2tsScte35Source": { "type": "string", "documentation": "Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output.", @@ -5671,12 +5874,12 @@ "AudioPids": { "shape": "__listOf__integerMin32Max8182", "locationName": "audioPids", - "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation." + "documentation": "Specify the packet identifiers (PIDs) for any elementary audio streams you include in this output. Specify multiple PIDs as a JSON array. Default is the range 482-492." }, "Bitrate": { "shape": "__integerMin0Max2147483647", "locationName": "bitrate", - "documentation": "The output bitrate of the transport stream in bits per second. Setting to 0 lets the muxer automatically determine the appropriate bitrate. Other common values are 3750000, 7500000, and 15000000." + "documentation": "Specify the output bitrate of the transport stream in bits per second. Setting to 0 lets the muxer automatically determine the appropriate bitrate. Other common values are 3750000, 7500000, and 15000000." }, "BufferModel": { "shape": "M2tsBufferModel", @@ -5693,7 +5896,7 @@ "DvbSubPids": { "shape": "__listOf__integerMin32Max8182", "locationName": "dvbSubPids", - "documentation": "Packet Identifier (PID) for input source DVB Subtitle data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation." + "documentation": "Specify the packet identifiers (PIDs) for DVB subtitle data included in this output. Specify multiple PIDs as a JSON array. Default is the range 460-479." }, "DvbTdtSettings": { "shape": "DvbTdtSettings", @@ -5702,7 +5905,7 @@ "DvbTeletextPid": { "shape": "__integerMin32Max8182", "locationName": "dvbTeletextPid", - "documentation": "Packet Identifier (PID) for input source DVB Teletext data to this output." + "documentation": "Specify the packet identifier (PID) for DVB teletext data you include in this output. Default is 499." }, "EbpAudioInterval": { "shape": "M2tsEbpAudioInterval", @@ -5716,15 +5919,20 @@ "shape": "M2tsEsRateInPes", "locationName": "esRateInPes" }, + "ForceTsVideoEbpOrder": { + "shape": "M2tsForceTsVideoEbpOrder", + "locationName": "forceTsVideoEbpOrder", + "documentation": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE)." + }, "FragmentTime": { "shape": "__doubleMin0", "locationName": "fragmentTime", - "documentation": "The length in seconds of each fragment. Only used with EBP markers." + "documentation": "The length, in seconds, of each fragment. Only used with EBP markers." }, "MaxPcrInterval": { "shape": "__integerMin0Max500", "locationName": "maxPcrInterval", - "documentation": "Maximum time in milliseconds between Program Clock References (PCRs) inserted into the transport stream." + "documentation": "Specify the maximum time, in milliseconds, between Program Clock References (PCRs) inserted into the transport stream." }, "MinEbpInterval": { "shape": "__integerMin0Max10000", @@ -5752,36 +5960,41 @@ "PcrPid": { "shape": "__integerMin32Max8182", "locationName": "pcrPid", - "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID." + "documentation": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID (VideoPid)." }, "PmtInterval": { "shape": "__integerMin0Max1000", "locationName": "pmtInterval", - "documentation": "The number of milliseconds between instances of this table in the output transport stream." + "documentation": "Specify the number of milliseconds between instances of the program map table (PMT) in the output transport stream." }, "PmtPid": { "shape": "__integerMin32Max8182", "locationName": "pmtPid", - "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream." + "documentation": "Specify the packet identifier (PID) for the program map table (PMT) itself. Default is 480." }, "PrivateMetadataPid": { "shape": "__integerMin32Max8182", "locationName": "privateMetadataPid", - "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream." + "documentation": "Specify the packet identifier (PID) of the private metadata stream. Default is 503." }, "ProgramNumber": { "shape": "__integerMin0Max65535", "locationName": "programNumber", - "documentation": "The value of the program number field in the Program Map Table." + "documentation": "Use Program number (programNumber) to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." }, "RateMode": { "shape": "M2tsRateMode", "locationName": "rateMode" }, + "Scte35Esam": { + "shape": "M2tsScte35Esam", + "locationName": "scte35Esam", + "documentation": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + }, "Scte35Pid": { "shape": "__integerMin32Max8182", "locationName": "scte35Pid", - "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream." + "documentation": "Specify the packet identifier (PID) of the SCTE-35 stream in the transport stream." }, "Scte35Source": { "shape": "M2tsScte35Source", @@ -5798,25 +6011,25 @@ "SegmentationTime": { "shape": "__doubleMin0", "locationName": "segmentationTime", - "documentation": "The length in seconds of each segment. Required unless markers is set to _none_." + "documentation": "Specify the length, in seconds, of each segment. Required unless markers is set to _none_." }, "TimedMetadataPid": { "shape": "__integerMin32Max8182", "locationName": "timedMetadataPid", - "documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream." + "documentation": "Specify the packet identifier (PID) for timed metadata in this output. Default is 502." }, "TransportStreamId": { "shape": "__integerMin0Max65535", "locationName": "transportStreamId", - "documentation": "The value of the transport stream ID field in the Program Map Table." + "documentation": "Specify the ID for the transport stream itself in the program map table for this output. Transport stream IDs and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." }, "VideoPid": { "shape": "__integerMin32Max8182", "locationName": "videoPid", - "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream." + "documentation": "Specify the packet identifier (PID) of the elementary video stream in the transport stream." } }, - "documentation": "Settings for M2TS Container." + "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." }, "M3u8NielsenId3": { "type": "string", @@ -5930,7 +6143,7 @@ "Framerate": { "shape": "MotionImageInsertionFramerate", "locationName": "framerate", - "documentation": "If your motion graphic asset is a .mov file, keep this setting unspecified. If your motion graphic asset is a series of .png files, specify the framerate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. Make sure that the number of images in your series matches the framerate and your intended overlay duration. For example, if you want a 30-second overlay at 30 fps, you should have 900 .png images. This overlay framerate doesn't need to match the framerate of the underlying video." + "documentation": "If your motion graphic asset is a .mov file, keep this setting unspecified. If your motion graphic asset is a series of .png files, specify the frame rate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. Make sure that the number of images in your series matches the frame rate and your intended overlay duration. For example, if you want a 30-second overlay at 30 fps, you should have 900 .png images. This overlay frame rate doesn't need to match the frame rate of the underlying video." }, "Input": { "shape": "__stringMin14Max1285PatternS3Mov09Png", @@ -5966,15 +6179,15 @@ "FramerateDenominator": { "shape": "__integerMin1Max17895697", "locationName": "framerateDenominator", - "documentation": "The bottom of the fraction that expresses your overlay framerate. For example, if your framerate is 24 fps, set this value to 1." + "documentation": "The bottom of the fraction that expresses your overlay frame rate. For example, if your frame rate is 24 fps, set this value to 1." }, "FramerateNumerator": { "shape": "__integerMin1Max2147483640", "locationName": "framerateNumerator", - "documentation": "The top of the fraction that expresses your overlay framerate. For example, if your framerate is 24 fps, set this value to 24." + "documentation": "The top of the fraction that expresses your overlay frame rate. For example, if your frame rate is 24 fps, set this value to 24." } }, - "documentation": "For motion overlays that don't have a built-in framerate, specify the framerate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. The overlay framerate doesn't need to match the framerate of the underlying video." + "documentation": "For motion overlays that don't have a built-in frame rate, specify the frame rate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. The overlay frame rate doesn't need to match the frame rate of the underlying video." }, "MotionImageInsertionMode": { "type": "string", @@ -6042,7 +6255,7 @@ }, "MovReference": { "type": "string", - "documentation": "A value of 'external' creates separate media files and the wrapper file (.mov) contains references to these media files. A value of 'self_contained' creates only a wrapper (.mov) file and this file contains all of the media.", + "documentation": "Always keep the default value (SELF_CONTAINED) for this setting.", "enum": [ "SELF_CONTAINED", "EXTERNAL" @@ -6181,7 +6394,7 @@ }, "Mpeg2FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6189,7 +6402,7 @@ }, "Mpeg2FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during framerate conversion.", + "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -6293,12 +6506,12 @@ "FramerateDenominator": { "shape": "__integerMin1Max1001", "locationName": "framerateDenominator", - "documentation": "Framerate denominator." + "documentation": "Frame rate denominator." }, "FramerateNumerator": { "shape": "__integerMin24Max60000", "locationName": "framerateNumerator", - "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." + "documentation": "Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, "GopClosedCadence": { "shape": "__integerMin0Max2147483647", @@ -6479,7 +6692,7 @@ "FragmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", - "documentation": "Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in seconds. Fragment length must be compatible with GOP size and framerate." + "documentation": "Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in seconds. Fragment length must be compatible with GOP size and frame rate." }, "ManifestEncoding": { "shape": "MsSmoothManifestEncoding", @@ -6867,7 +7080,7 @@ }, "ProresFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6875,7 +7088,7 @@ }, "ProresFramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during framerate conversion.", + "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -6918,12 +7131,12 @@ "FramerateDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", - "documentation": "Framerate denominator." + "documentation": "Frame rate denominator." }, "FramerateNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", - "documentation": "When you use the API for transcode jobs that use framerate conversion, specify the framerate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator." + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator." }, "InterlaceMode": { "shape": "ProresInterlaceMode", @@ -7201,7 +7414,7 @@ }, "ScalingBehavior": { "type": "string", - "documentation": "Applies only if your input aspect ratio is different from your output aspect ratio. Enable Stretch to output (StretchToOutput) to have the service stretch your video image to fit. Leave this setting disabled to allow the service to letterbox your video instead. This setting overrides any positioning value you specify elsewhere in the job.", + "documentation": "Applies only if your input aspect ratio is different from your output aspect ratio. Choose \"Stretch to output\" to have the service stretch your video image to fit. Keep the setting \"Default\" to allow the service to letterbox your video instead. This setting overrides any positioning value you specify elsewhere in the job.", "enum": [ "DEFAULT", "STRETCH_TO_OUTPUT" @@ -7209,7 +7422,7 @@ }, "SccDestinationFramerate": { "type": "string", - "documentation": "Set Framerate (SccDestinationFramerate) to make sure that the captions and the video are synchronized in the output. Specify a framerate that matches the framerate of the associated video. If the video framerate is 29.97, choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).", + "documentation": "Set Framerate (SccDestinationFramerate) to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).", "enum": [ "FRAMERATE_23_97", "FRAMERATE_24", @@ -7231,7 +7444,7 @@ "type": "structure", "members": { "CertificateArn": { - "shape": "__stringPatternArnAwsAcm", + "shape": "__stringPatternArnAwsUsGovAcm", "locationName": "certificateArn", "documentation": "Optional AWS Certificate Manager ARN for a certificate to send to the keyprovider. The certificate holds a key used by the keyprovider to encrypt the keys in its response." }, @@ -7366,7 +7579,7 @@ "Anchor": { "shape": "__stringPattern010920405090509092", "locationName": "anchor", - "documentation": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode (Anchor) to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores framerate conversion. System behavior for Anchor Timecode varies depending on your setting for Source (TimecodeSource). * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first input frame is the specified value in Start Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the first frame is the timecode value on the first input frame of the input." + "documentation": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode (Anchor) to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for Anchor Timecode varies depending on your setting for Source (TimecodeSource). * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first input frame is the specified value in Start Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the first frame is the timecode value on the first input frame of the input." }, "Source": { "shape": "TimecodeSource", @@ -7448,6 +7661,17 @@ }, "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." }, + "TrackSourceSettings": { + "type": "structure", + "members": { + "TrackNumber": { + "shape": "__integerMin1Max2147483647", + "locationName": "trackNumber", + "documentation": "Use this setting to select a single captions track from a source. Track numbers correspond to the order in the captions source file. For IMF sources, track numbering is based on the order that the captions appear in the CPL. For example, use 1 to select the captions asset that is listed first in the CPL. To include more than one captions track in your job outputs, create multiple input captions selectors. Specify one track per selector." + } + }, + "documentation": "Settings specific to caption sources that are specfied by track number. Sources include IMSC in IMF." + }, "TtmlDestinationSettings": { "type": "structure", "members": { @@ -7500,6 +7724,11 @@ "UpdateJobTemplateRequest": { "type": "structure", "members": { + "AccelerationSettings": { + "shape": "AccelerationSettings", + "locationName": "accelerationSettings", + "documentation": "This is a beta feature. If you are interested in using this feature, please contact AWS customer support." + }, "Category": { "shape": "__string", "locationName": "category", @@ -7524,6 +7753,11 @@ "Settings": { "shape": "JobTemplateSettings", "locationName": "settings" + }, + "StatusUpdateIntervalInSecs": { + "shape": "__integerMin10Max600", + "locationName": "statusUpdateIntervalInSecs", + "documentation": "Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." } }, "required": [ @@ -7665,7 +7899,8 @@ }, "AntiAlias": { "shape": "AntiAlias", - "locationName": "antiAlias" + "locationName": "antiAlias", + "documentation": "You no longer need to specify the anti-alias filter. It's now automatically applied to all outputs. This property is deprecated." }, "CodecSettings": { "shape": "VideoCodecSettings", @@ -7678,7 +7913,7 @@ "Crop": { "shape": "Rectangle", "locationName": "crop", - "documentation": "Applies only if your input aspect ratio is different from your output aspect ratio. Use Input cropping rectangle (Crop) to specify the video area the service will include in the output. This will crop the input source, causing video pixels to be removed on encode. Do not use this setting if you have enabled Stretch to output (stretchToOutput) in your output settings." + "documentation": "Applies only if your input aspect ratio is different from your output aspect ratio. Use Input cropping rectangle (Crop) to specify the video area the service will include in the output. This will crop the input source, causing video pixels to be removed on encode. If you crop your input frame size to smaller than your output frame size, make sure to specify the behavior you want in your output setting \"Scaling behavior\"." }, "DropFrameTimecode": { "shape": "DropFrameTimecode", @@ -7710,7 +7945,7 @@ "Sharpness": { "shape": "__integerMin0Max100", "locationName": "sharpness", - "documentation": "Use Sharpness (Sharpness)setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution, and if you set Anti-alias (AntiAlias) to ENABLED. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." + "documentation": "Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." }, "TimecodeInsertion": { "shape": "VideoTimecodeInsertion", @@ -7800,13 +8035,17 @@ "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "programNumber", "documentation": "Selects a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported." + }, + "Rotate": { + "shape": "InputRotate", + "locationName": "rotate" } }, "documentation": "Selector for video." }, "VideoTimecodeInsertion": { "type": "string", - "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input framerate is identical to the output framerate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does.", + "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does.", "enum": [ "DISABLED", "PIC_TIMING_SEI" @@ -7934,6 +8173,11 @@ "min": 0, "max": 30 }, + "__integerMin0Max30000": { + "type": "integer", + "min": 0, + "max": 30000 + }, "__integerMin0Max3600": { "type": "integer", "min": 0, @@ -8014,6 +8258,11 @@ "min": 10, "max": 48 }, + "__integerMin10Max600": { + "type": "long", + "min": 10, + "max": 600 + }, "__integerMin16Max24": { "type": "integer", "min": 16, @@ -8365,6 +8614,12 @@ "shape": "__stringPattern09aFAF809aFAF409aFAF409aFAF409aFAF12" } }, + "__listOf__stringPatternS3ASSETMAPXml": { + "type": "list", + "member": { + "shape": "__stringPatternS3ASSETMAPXml" + } + }, "__long": { "type": "long" }, @@ -8511,9 +8766,9 @@ "type": "string", "pattern": "^[A-Za-z0-9]{32}$" }, - "__stringPatternArnAwsAcm": { + "__stringPatternArnAwsUsGovAcm": { "type": "string", - "pattern": "^arn:aws:acm:" + "pattern": "^arn:aws(-us-gov)?:acm:" }, "__stringPatternDD": { "type": "string", @@ -8531,14 +8786,26 @@ "type": "string", "pattern": "^s3:\\/\\/" }, - "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MM": { + "__stringPatternS3ASSETMAPXml": { "type": "string", - "pattern": "^(s3:\\/\\/)([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM])))$" + "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { "type": "string", "pattern": "^(s3:\\/\\/)([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" }, + "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { + "type": "string", + "pattern": "^(s3:\\/\\/)([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))$" + }, + "__stringPatternSNManifestConfirmConditionNotificationNS": { + "type": "string", + "pattern": "^\\s*<(.|\\n)*ManifestConfirmConditionNotification(.|\\n)*>\\s*$" + }, + "__stringPatternSNSignalProcessingNotificationNS": { + "type": "string", + "pattern": "^\\s*<(.|\\n)*SignalProcessingNotification(.|\\n)*>\\s*$" + }, "__stringPatternWS": { "type": "string", "pattern": "^[\\w\\s]*$" diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index affd624c..8747b240 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -196,6 +196,36 @@ ], "documentation": "Creates a Input Security Group" }, + "CreateTags": { + "name": "CreateTags", + "http": { + "method": "POST", + "requestUri": "/prod/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "CreateTagsRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "The arn was not found." + }, + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal Service Error" + }, + { + "shape": "ForbiddenException", + "documentation": "Access was denied" + } + ], + "documentation": "Create tags for a resource" + }, "DeleteChannel": { "name": "DeleteChannel", "http": { @@ -392,6 +422,36 @@ ], "documentation": "Delete an expired reservation." }, + "DeleteTags": { + "name": "DeleteTags", + "http": { + "method": "DELETE", + "requestUri": "/prod/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteTagsRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "The arn was not found." + }, + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal Service Error" + }, + { + "shape": "ForbiddenException", + "documentation": "Access was denied" + } + ], + "documentation": "Removes tags for a resource" + }, "DescribeChannel": { "name": "DescribeChannel", "http": { @@ -878,6 +938,40 @@ ], "documentation": "List purchased reservations." }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/prod/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse", + "documentation": "An array of tags" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "The arn was not found" + }, + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal Service Error" + }, + { + "shape": "ForbiddenException", + "documentation": "Access was denied" + } + ], + "documentation": "Produces list of tags that have been created for a resource" + }, "PurchaseOffering": { "name": "PurchaseOffering", "http": { @@ -1405,7 +1499,7 @@ "Destination": { "shape": "OutputLocationRef", "locationName": "destination", - "documentation": "A directory and base filename where archive files should be written. If the base filename portion of the URI is left blank, the base filename of the first input will be automatically inserted." + "documentation": "A directory and base filename where archive files should be written." }, "RolloverInterval": { "shape": "__integerMin1", @@ -2350,6 +2444,11 @@ "State": { "shape": "ChannelState", "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "Placeholder documentation for Channel" @@ -2515,6 +2614,11 @@ "shape": "__string", "locationName": "roleArn", "documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel." + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "Placeholder documentation for CreateChannel" @@ -2566,6 +2670,11 @@ "shape": "__string", "locationName": "roleArn", "documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel." + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "A request to create a channel" @@ -2629,9 +2738,18 @@ "locationName": "sources", "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "Type": { "shape": "InputType", "locationName": "type" + }, + "Vpc": { + "shape": "InputVpcRequest", + "locationName": "vpc" } }, "documentation": "Placeholder documentation for CreateInput" @@ -2675,9 +2793,18 @@ "locationName": "sources", "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "Type": { "shape": "InputType", "locationName": "type" + }, + "Vpc": { + "shape": "InputVpcRequest", + "locationName": "vpc" } }, "documentation": "The name of the input" @@ -2705,6 +2832,11 @@ "CreateInputSecurityGroupRequest": { "type": "structure", "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "WhitelistRules": { "shape": "__listOfInputWhitelistRuleCidr", "locationName": "whitelistRules", @@ -2733,6 +2865,24 @@ }, "documentation": "Placeholder documentation for CreateInputSecurityGroupResultModel" }, + "CreateTagsRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "required": [ + "ResourceArn" + ], + "documentation": "Placeholder documentation for CreateTagsRequest" + }, "DeleteChannelRequest": { "type": "structure", "members": { @@ -2807,6 +2957,11 @@ "State": { "shape": "ChannelState", "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "Placeholder documentation for DeleteChannelResponse" @@ -2959,6 +3114,27 @@ }, "documentation": "Placeholder documentation for DeleteReservationResponse" }, + "DeleteTagsRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + }, + "TagKeys": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "An array of tag keys to delete" + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ], + "documentation": "Placeholder documentation for DeleteTagsRequest" + }, "DescribeChannelRequest": { "type": "structure", "members": { @@ -3033,6 +3209,11 @@ "State": { "shape": "ChannelState", "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "Placeholder documentation for DescribeChannelResponse" @@ -3093,7 +3274,7 @@ "SecurityGroups": { "shape": "__listOf__string", "locationName": "securityGroups", - "documentation": "A list of IDs for all the security groups attached to the input." + "documentation": "A list of IDs for all the Input Security Groups attached to the input." }, "Sources": { "shape": "__listOfInputSource", @@ -3104,6 +3285,11 @@ "shape": "InputState", "locationName": "state" }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "Type": { "shape": "InputType", "locationName": "type" @@ -3149,6 +3335,11 @@ "locationName": "state", "documentation": "The current state of the Input Security Group." }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "WhitelistRules": { "shape": "__listOfInputWhitelistRule", "locationName": "whitelistRules", @@ -4063,6 +4254,45 @@ }, "documentation": "Placeholder documentation for ForbiddenException" }, + "FrameCaptureGroupSettings": { + "type": "structure", + "members": { + "Destination": { + "shape": "OutputLocationRef", + "locationName": "destination", + "documentation": "The destination for the frame capture files. Either the URI for an Amazon S3 bucket and object, plus a file name prefix (for example, s3ssl://sportsDelivery/highlights/20180820/curling_) or the URI for a MediaStore container, plus a file name prefix (for example, mediastoressl://sportsDelivery/20180820/curling_). The final file names consist of the prefix from the destination field (for example, \"curling_\") + name modifier + the counter (5 digits, starting from 00001) + extension (which is always .jpg). For example, curlingLow.00001.jpg" + } + }, + "documentation": "Frame Capture Group Settings", + "required": [ + "Destination" + ] + }, + "FrameCaptureOutputSettings": { + "type": "structure", + "members": { + "NameModifier": { + "shape": "__string", + "locationName": "nameModifier", + "documentation": "Required if the output group contains more than one output. This modifier forms part of the output file name." + } + }, + "documentation": "Frame Capture Output Settings" + }, + "FrameCaptureSettings": { + "type": "structure", + "members": { + "CaptureInterval": { + "shape": "__integerMin1Max3600", + "locationName": "captureInterval", + "documentation": "The frequency, in seconds, for capturing frames for inclusion in the output. For example, \"10\" means capture a frame every 10 seconds." + } + }, + "documentation": "Frame Capture Settings", + "required": [ + "CaptureInterval" + ] + }, "GatewayTimeoutException": { "type": "structure", "members": { @@ -4323,12 +4553,12 @@ "documentation": "This field indicates how the output video frame rate is specified. If \"specified\" is selected then the output video frame rate is determined by framerateNumerator and framerateDenominator, else if \"initializeFromSource\" is selected then the output video frame rate will be set equal to the input video frame rate of the first input." }, "FramerateDenominator": { - "shape": "__integer", + "shape": "__integerMin1", "locationName": "framerateDenominator", "documentation": "Framerate denominator." }, "FramerateNumerator": { - "shape": "__integer", + "shape": "__integerMin1", "locationName": "framerateNumerator", "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, @@ -4710,6 +4940,11 @@ "locationName": "hlsCdnSettings", "documentation": "Parameters that control interactions with the CDN." }, + "IFrameOnlyPlaylists": { + "shape": "IFrameOnlyPlaylistType", + "locationName": "iFrameOnlyPlaylists", + "documentation": "If enabled, writes out I-Frame only playlists in addition to media playlists." + }, "IndexNSegments": { "shape": "__integerMin3", "locationName": "indexNSegments", @@ -5088,6 +5323,14 @@ }, "documentation": "Placeholder documentation for HlsWebdavSettings" }, + "IFrameOnlyPlaylistType": { + "type": "string", + "documentation": "When set to \"standard\", an I-Frame only playlist will be written out for each video output in the output group. This I-Frame only playlist will contain byte range offsets pointing to the I-frame(s) in each segment.", + "enum": [ + "DISABLED", + "STANDARD" + ] + }, "Input": { "type": "structure", "members": { @@ -5129,7 +5372,7 @@ "SecurityGroups": { "shape": "__listOf__string", "locationName": "securityGroups", - "documentation": "A list of IDs for all the security groups attached to the input." + "documentation": "A list of IDs for all the Input Security Groups attached to the input." }, "Sources": { "shape": "__listOfInputSource", @@ -5140,6 +5383,11 @@ "shape": "InputState", "locationName": "state" }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "Type": { "shape": "InputType", "locationName": "type" @@ -5230,6 +5478,10 @@ "shape": "__string", "locationName": "url", "documentation": "This represents the endpoint that the customer stream will be\npushed to.\n" + }, + "Vpc": { + "shape": "InputDestinationVpc", + "locationName": "vpc" } }, "documentation": "The settings for a PUSH type input." @@ -5245,6 +5497,22 @@ }, "documentation": "Endpoint settings for a PUSH type input." }, + "InputDestinationVpc": { + "type": "structure", + "members": { + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The availability zone of the Input destination.\n" + }, + "NetworkInterfaceId": { + "shape": "__string", + "locationName": "networkInterfaceId", + "documentation": "The network interface ID of the Input destination in the VPC.\n" + } + }, + "documentation": "The properties for a VPC type input destination." + }, "InputFilter": { "type": "string", "enum": [ @@ -5391,6 +5659,11 @@ "locationName": "state", "documentation": "The current state of the Input Security Group." }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "WhitelistRules": { "shape": "__listOfInputWhitelistRule", "locationName": "whitelistRules", @@ -5412,6 +5685,11 @@ "InputSecurityGroupWhitelistRequest": { "type": "structure", "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "WhitelistRules": { "shape": "__listOfInputWhitelistRuleCidr", "locationName": "whitelistRules", @@ -5580,6 +5858,25 @@ ], "documentation": "Placeholder documentation for InputType" }, + "InputVpcRequest": { + "type": "structure", + "members": { + "SecurityGroupIds": { + "shape": "__listOf__string", + "locationName": "securityGroupIds", + "documentation": "A list of up to 5 EC2 VPC security group IDs to attach to the Input VPC network interfaces.\nRequires subnetIds. If none are specified then the VPC default security group will be used.\n" + }, + "SubnetIds": { + "shape": "__listOf__string", + "locationName": "subnetIds", + "documentation": "A list of 2 VPC subnet IDs from the same VPC.\nSubnet IDs must be mapped to two unique availability zones (AZ).\n" + } + }, + "documentation": "Settings for a private VPC Input.\nWhen this property is specified, the input destination addresses will be created in a VPC rather than with public Internet addresses.\nThis property requires setting the roleArn property on Input creation.\nNot compatible with the inputSecurityGroups property.\n", + "required": [ + "SubnetIds" + ] + }, "InputWhitelistRule": { "type": "structure", "members": { @@ -5976,6 +6273,30 @@ }, "documentation": "ListReservations response" }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + } + }, + "required": [ + "ResourceArn" + ], + "documentation": "Placeholder documentation for ListTagsForResourceRequest" + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "Placeholder documentation for ListTagsForResourceResponse" + }, "LogLevel": { "type": "string", "documentation": "The log level the user wants for their channel.", @@ -6873,6 +7194,10 @@ "shape": "ArchiveGroupSettings", "locationName": "archiveGroupSettings" }, + "FrameCaptureGroupSettings": { + "shape": "FrameCaptureGroupSettings", + "locationName": "frameCaptureGroupSettings" + }, "HlsGroupSettings": { "shape": "HlsGroupSettings", "locationName": "hlsGroupSettings" @@ -6909,6 +7234,10 @@ "shape": "ArchiveOutputSettings", "locationName": "archiveOutputSettings" }, + "FrameCaptureOutputSettings": { + "shape": "FrameCaptureOutputSettings", + "locationName": "frameCaptureOutputSettings" + }, "HlsOutputSettings": { "shape": "HlsOutputSettings", "locationName": "hlsOutputSettings" @@ -7965,6 +8294,11 @@ "State": { "shape": "ChannelState", "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "Placeholder documentation for StartChannelResponse" @@ -8137,10 +8471,35 @@ "State": { "shape": "ChannelState", "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." } }, "documentation": "Placeholder documentation for StopChannelResponse" }, + "Tags": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + }, + "documentation": "Placeholder documentation for Tags" + }, + "TagsModel": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + }, + "documentation": "Placeholder documentation for TagsModel" + }, "TeletextDestinationSettings": { "type": "structure", "members": { @@ -8526,6 +8885,11 @@ "locationName": "inputSecurityGroupId", "documentation": "The id of the Input Security Group to update." }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, "WhitelistRules": { "shape": "__listOfInputWhitelistRuleCidr", "locationName": "whitelistRules", @@ -8574,6 +8938,10 @@ "VideoCodecSettings": { "type": "structure", "members": { + "FrameCaptureSettings": { + "shape": "FrameCaptureSettings", + "locationName": "frameCaptureSettings" + }, "H264Settings": { "shape": "H264Settings", "locationName": "h264Settings" @@ -8592,7 +8960,7 @@ "Height": { "shape": "__integer", "locationName": "height", - "documentation": "Output video height (in pixels). Leave blank to use source video height. If left blank, width must also be unspecified." + "documentation": "Output video height, in pixels. Must be an even number. For most codecs, you can leave this field and width blank in order to use the height and width (resolution) from the source. Note, however, that leaving blank is not recommended. For the Frame Capture codec, height and width are required." }, "Name": { "shape": "__string", @@ -8602,22 +8970,22 @@ "RespondToAfd": { "shape": "VideoDescriptionRespondToAfd", "locationName": "respondToAfd", - "documentation": "Indicates how to respond to the AFD values in the input stream. Setting to \"respond\" causes input video to be clipped, depending on AFD value, input display aspect ratio and output display aspect ratio." + "documentation": "Indicates how to respond to the AFD values in the input stream. RESPOND causes input video to be clipped, depending on the AFD value, input display aspect ratio, and output display aspect ratio, and (except for FRAMECAPTURE codec) includes the values in the output. PASSTHROUGH (does not apply to FRAMECAPTURE codec) ignores the AFD values and includes the values in the output, so input video is not clipped. NONE ignores the AFD values and does not include the values through to the output, so input video is not clipped." }, "ScalingBehavior": { "shape": "VideoDescriptionScalingBehavior", "locationName": "scalingBehavior", - "documentation": "When set to \"stretchToOutput\", automatically configures the output position to stretch the video to the specified output resolution. This option will override any position value." + "documentation": "STRETCHTOOUTPUT configures the output position to stretch the video to the specified output resolution (height and width). This option will override any position value. DEFAULT may insert black boxes (pillar boxes or letter boxes) around the video to provide the specified output resolution." }, "Sharpness": { "shape": "__integerMin0Max100", "locationName": "sharpness", - "documentation": "Changes the width of the anti-alias filter kernel used for scaling. Only applies if scaling is being performed and antiAlias is set to true. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." + "documentation": "Changes the strength of the anti-alias filter used for scaling. 0 is the softest setting, 100 is the sharpest. A setting of 50 is recommended for most content." }, "Width": { "shape": "__integer", "locationName": "width", - "documentation": "Output video width (in pixels). Leave out to use source video width. If left out, height must also be left out. Display aspect ratio is always preserved by letterboxing or pillarboxing when necessary." + "documentation": "Output video width, in pixels. Must be an even number. For most codecs, you can leave this field and height blank in order to use the height and width (resolution) from the source. Note, however, that leaving blank is not recommended. For the Frame Capture codec, height and width are required." } }, "documentation": "Video settings for this stream.", @@ -8917,6 +9285,11 @@ "max": 32, "documentation": "Placeholder documentation for __integerMin1Max32" }, + "__integerMin1Max3600": { + "type": "integer", + "min": 1, + "max": 3600 + }, "__integerMin1Max4": { "type": "integer", "min": 1, diff --git a/botocore/data/mediapackage/2017-10-12/service-2.json b/botocore/data/mediapackage/2017-10-12/service-2.json index 2f8693b6..1ade7bda 100644 --- a/botocore/data/mediapackage/2017-10-12/service-2.json +++ b/botocore/data/mediapackage/2017-10-12/service-2.json @@ -799,6 +799,11 @@ "locationName": "encryption", "shape": "DashEncryption" }, + "ManifestLayout": { + "documentation": "Determines the position of some tags in the Media Presentation Description (MPD). When set to FULL, elements like SegmentTemplate and ContentProtection are included in each Representation. When set to COMPACT, duplicate elements are combined and presented at the AdaptationSet level.", + "locationName": "manifestLayout", + "shape": "ManifestLayout" + }, "ManifestWindowSeconds": { "documentation": "Time window (in seconds) contained in each manifest.", "locationName": "manifestWindowSeconds", @@ -829,6 +834,11 @@ "locationName": "segmentDurationSeconds", "shape": "__integer" }, + "SegmentTemplateFormat": { + "documentation": "Determines the type of SegmentTimeline included in the Media Presentation Description (MPD). When set to NUMBER_WITH_TIMELINE, a full timeline is presented in each SegmentTemplate, with $Number$ media URLs. When set to TIME_WITH_TIMELINE, a full timeline is presented in each SegmentTemplate, with $Time$ media URLs.", + "locationName": "segmentTemplateFormat", + "shape": "SegmentTemplateFormat" + }, "StreamSelection": { "locationName": "streamSelection", "shape": "StreamSelection" @@ -1312,6 +1322,13 @@ }, "type": "structure" }, + "ManifestLayout": { + "enum": [ + "FULL", + "COMPACT" + ], + "type": "string" + }, "MaxResults": { "max": 1000, "min": 1, @@ -1659,6 +1676,13 @@ }, "type": "structure" }, + "SegmentTemplateFormat": { + "enum": [ + "NUMBER_WITH_TIMELINE", + "TIME_WITH_TIMELINE" + ], + "type": "string" + }, "ServiceUnavailableException": { "documentation": "An unexpected error occurred.", "error": { diff --git a/botocore/data/mediastore-data/2017-09-01/paginators-1.json b/botocore/data/mediastore-data/2017-09-01/paginators-1.json index ea142457..7b1c0f7e 100644 --- a/botocore/data/mediastore-data/2017-09-01/paginators-1.json +++ b/botocore/data/mediastore-data/2017-09-01/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListItems": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + } + } } diff --git a/botocore/data/mediastore/2017-09-01/paginators-1.json b/botocore/data/mediastore/2017-09-01/paginators-1.json index ea142457..ed3ece02 100644 --- a/botocore/data/mediastore/2017-09-01/paginators-1.json +++ b/botocore/data/mediastore/2017-09-01/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListContainers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Containers" + } + } } diff --git a/botocore/data/mediastore/2017-09-01/service-2.json b/botocore/data/mediastore/2017-09-01/service-2.json index 822c3d42..bcf6e470 100644 --- a/botocore/data/mediastore/2017-09-01/service-2.json +++ b/botocore/data/mediastore/2017-09-01/service-2.json @@ -90,7 +90,7 @@ {"shape":"PolicyNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Removes an object lifecycle policy from a container.

" + "documentation":"

Removes an object lifecycle policy from a container. It takes up to 20 minutes for the change to take effect.

" }, "DescribeContainer":{ "name":"DescribeContainer", @@ -195,7 +195,7 @@ {"shape":"ContainerInUseException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.

To enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.

" + "documentation":"

Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.

To enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.

To learn more about CORS, see Cross-Origin Resource Sharing (CORS) in AWS Elemental MediaStore.

" }, "PutLifecyclePolicy":{ "name":"PutLifecyclePolicy", @@ -210,7 +210,37 @@ {"shape":"ContainerNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy.

" + "documentation":"

Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.

For information about how to construct an object lifecycle policy, see Components of an Object Lifecycle Policy.

" + }, + "StartAccessLogging":{ + "name":"StartAccessLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartAccessLoggingInput"}, + "output":{"shape":"StartAccessLoggingOutput"}, + "errors":[ + {"shape":"ContainerInUseException"}, + {"shape":"ContainerNotFoundException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Starts access logging on the specified container. When you enable access logging on a container, MediaStore delivers access logs for objects stored in that container to Amazon CloudWatch Logs.

" + }, + "StopAccessLogging":{ + "name":"StopAccessLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopAccessLoggingInput"}, + "output":{"shape":"StopAccessLoggingOutput"}, + "errors":[ + {"shape":"ContainerInUseException"}, + {"shape":"ContainerNotFoundException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Stops access logging on the specified container. When you stop access logging on a container, MediaStore stops sending access logs to Amazon CloudWatch Logs. These access logs are not saved and are not retrievable.

" } }, "shapes":{ @@ -254,6 +284,10 @@ "Status":{ "shape":"ContainerStatus", "documentation":"

The status of container creation or deletion. The status is one of the following: CREATING, ACTIVE, or DELETING. While the service is creating the container, the status is CREATING. When the endpoint is available, the status changes to ACTIVE.

" + }, + "AccessLoggingEnabled":{ + "shape":"ContainerAccessLoggingEnabled", + "documentation":"

The state of access logging on the container. This value is false by default, indicating that AWS Elemental MediaStore does not send access logs to Amazon CloudWatch Logs. When you enable access logging on the container, MediaStore changes this value to true, indicating that the service delivers access logs for objects stored in that container to CloudWatch Logs.

" } }, "documentation":"

This section describes operations that you can perform on an AWS Elemental MediaStore container.

" @@ -264,6 +298,7 @@ "min":1, "pattern":"arn:aws:mediastore:[a-z]+-[a-z]+-\\d:\\d{12}:container/\\w{1,255}" }, + "ContainerAccessLoggingEnabled":{"type":"boolean"}, "ContainerInUseException":{ "type":"structure", "members":{ @@ -609,7 +644,7 @@ }, "PaginationToken":{ "type":"string", - "max":255, + "max":1024, "min":1, "pattern":"[0-9A-Za-z=/+]+" }, @@ -687,6 +722,36 @@ "members":{ } }, + "StartAccessLoggingInput":{ + "type":"structure", + "required":["ContainerName"], + "members":{ + "ContainerName":{ + "shape":"ContainerName", + "documentation":"

The name of the container that you want to start access logging on.

" + } + } + }, + "StartAccessLoggingOutput":{ + "type":"structure", + "members":{ + } + }, + "StopAccessLoggingInput":{ + "type":"structure", + "required":["ContainerName"], + "members":{ + "ContainerName":{ + "shape":"ContainerName", + "documentation":"

The name of the container that you want to stop access logging on.

" + } + } + }, + "StopAccessLoggingOutput":{ + "type":"structure", + "members":{ + } + }, "TimeStamp":{"type":"timestamp"} }, "documentation":"

An AWS Elemental MediaStore container is a namespace that holds folders and objects. You use a container endpoint to create, read, and delete objects.

" diff --git a/botocore/data/mediatailor/2018-04-23/paginators-1.json b/botocore/data/mediatailor/2018-04-23/paginators-1.json index ea142457..993b5eaf 100644 --- a/botocore/data/mediatailor/2018-04-23/paginators-1.json +++ b/botocore/data/mediatailor/2018-04-23/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListPlaybackConfigurations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + } + } } diff --git a/botocore/data/mediatailor/2018-04-23/service-2.json b/botocore/data/mediatailor/2018-04-23/service-2.json index 43ba04c1..5f2e794c 100644 --- a/botocore/data/mediatailor/2018-04-23/service-2.json +++ b/botocore/data/mediatailor/2018-04-23/service-2.json @@ -26,7 +26,7 @@ "shape" : "DeletePlaybackConfigurationResponse" }, "errors" : [ ], - "documentation" : "

Deletes the configuration for the specified name.

" + "documentation" : "

Deletes the playback configuration for the specified name.

" }, "GetPlaybackConfiguration" : { "name" : "GetPlaybackConfiguration", @@ -39,10 +39,11 @@ "shape" : "GetPlaybackConfigurationRequest" }, "output" : { - "shape" : "GetPlaybackConfigurationResponse" + "shape" : "GetPlaybackConfigurationResponse", + "documentation" : "

Success.

" }, "errors" : [ ], - "documentation" : "

Returns the configuration for the specified name.

" + "documentation" : "

Returns the playback configuration for the specified name.

" }, "ListPlaybackConfigurations" : { "name" : "ListPlaybackConfigurations", @@ -55,10 +56,31 @@ "shape" : "ListPlaybackConfigurationsRequest" }, "output" : { - "shape" : "ListPlaybackConfigurationsResponse" + "shape" : "ListPlaybackConfigurationsResponse", + "documentation" : "

Success.

" }, "errors" : [ ], - "documentation" : "

Returns a list of the configurations defined in AWS Elemental MediaTailor. You can specify a max number of configurations to return at a time. The default max is 50. Results are returned in pagefuls. If AWS Elemental MediaTailor has more configurations than the specified max, it provides parameters in the response that you can use to retrieve the next pageful.

" + "documentation" : "

Returns a list of the playback configurations defined in AWS Elemental MediaTailor. You can specify a maximum number of configurations to return at a time. The default maximum is 50. Results are returned in pagefuls. If MediaTailor has more configurations than the specified maximum, it provides parameters in the response that you can use to retrieve the next pageful.

" + }, + "ListTagsForResource" : { + "name" : "ListTagsForResource", + "http" : { + "method" : "GET", + "requestUri" : "/tags/{ResourceArn}", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListTagsForResourceRequest" + }, + "output" : { + "shape" : "ListTagsForResourceResponse", + "documentation" : "

Success.

" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "

Invalid request parameters.

" + } ], + "documentation" : "

Returns a list of the tags assigned to the specified playback configuration resource.

" }, "PutPlaybackConfiguration" : { "name" : "PutPlaybackConfiguration", @@ -71,13 +93,54 @@ "shape" : "PutPlaybackConfigurationRequest" }, "output" : { - "shape" : "PutPlaybackConfigurationResponse" + "shape" : "PutPlaybackConfigurationResponse", + "documentation" : "

Success.

" }, "errors" : [ ], - "documentation" : "

Adds a new configuration to AWS Elemental MediaTailor.

" + "documentation" : "

Adds a new playback configuration to AWS Elemental MediaTailor.

" + }, + "TagResource" : { + "name" : "TagResource", + "http" : { + "method" : "POST", + "requestUri" : "/tags/{ResourceArn}", + "responseCode" : 204 + }, + "input" : { + "shape" : "TagResourceRequest" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "

Invalid request parameters.

" + } ], + "documentation" : "

Adds tags to the specified playback configuration resource. You can specify one or more tags to add.

" + }, + "UntagResource" : { + "name" : "UntagResource", + "http" : { + "method" : "DELETE", + "requestUri" : "/tags/{ResourceArn}", + "responseCode" : 204 + }, + "input" : { + "shape" : "UntagResourceRequest" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "

Invalid request parameters.

" + } ], + "documentation" : "

Removes tags from the specified playback configuration resource. You can specify one or more tags to remove.

" } }, "shapes" : { + "BadRequestException" : { + "type" : "structure", + "exception" : true, + "error" : { + "httpStatusCode" : 400 + }, + "documentation" : "

Invalid request parameters.

" + }, "CdnConfiguration" : { "type" : "structure", "members" : { @@ -107,24 +170,24 @@ "members" : { "ManifestEndpointPrefix" : { "shape" : "__string", - "documentation" : "

The URL that is used to initiate a playback session for devices that support DASH.

" + "documentation" : "

The URL generated by MediaTailor to initiate a playback session. The session uses server-side reporting. This setting is ignored in PUT operations.

" }, "MpdLocation" : { "shape" : "__string", - "documentation" : "

The setting that controls whether MediaTailor includes the Location tag in DASH Manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

" + "documentation" : "

The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

" } }, - "documentation" : "

The configuration object for DASH content.

" + "documentation" : "

The configuration for DASH content.

" }, "DashConfigurationForPut" : { "type" : "structure", "members" : { "MpdLocation" : { "shape" : "__string", - "documentation" : "

The setting that controls whether MediaTailor includes the Location tag in DASH Manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

" + "documentation" : "

The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

" } }, - "documentation" : "

The configuration object for DASH content.

" + "documentation" : "

The configuration for DASH PUT operations.

" }, "DeletePlaybackConfigurationRequest" : { "type" : "structure", @@ -133,7 +196,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "Name", - "documentation" : "

The identifier for the configuration.

" + "documentation" : "

The identifier for the playback configuration.

" } }, "required" : [ "Name" ] @@ -153,7 +216,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "Name", - "documentation" : "

The identifier for the configuration.

" + "documentation" : "

The identifier for the playback configuration.

" } }, "required" : [ "Name" ] @@ -163,7 +226,7 @@ "members" : { "AdDecisionServerUrl" : { "shape" : "__string", - "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25000 characters.

" + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

" }, "CdnConfiguration" : { "shape" : "CdnConfiguration", @@ -171,7 +234,7 @@ }, "DashConfiguration" : { "shape" : "DashConfiguration", - "documentation" : "

The configuration object for DASH content.

" + "documentation" : "

The configuration for DASH content.

" }, "HlsConfiguration" : { "shape" : "HlsConfiguration", @@ -179,7 +242,11 @@ }, "Name" : { "shape" : "__string", - "documentation" : "

The identifier for the configuration.

" + "documentation" : "

The identifier for the playback configuration.

" + }, + "PlaybackConfigurationArn" : { + "shape" : "__string", + "documentation" : "

The Amazon Resource Name (ARN) for the playback configuration.

" }, "PlaybackEndpointPrefix" : { "shape" : "__string", @@ -191,11 +258,16 @@ }, "SlateAdUrl" : { "shape" : "__string", - "documentation" : "

URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + "documentation" : "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

The tags assigned to the playback configuration.

" }, "TranscodeProfileName" : { "shape" : "__string", - "documentation" : "

Associate this playbackConfiguration with a custom transcode profile, overriding MediaTailor's dynamic transcoding defaults. Do not include this field if you have not setup custom profiles with the MediaTailor service team.

" + "documentation" : "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

" }, "VideoContentSourceUrl" : { "shape" : "__string", @@ -209,19 +281,48 @@ "members" : { "AdDecisionServerUrl" : { "shape" : "__string", - "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25000 characters.

" + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

" }, "CdnConfiguration" : { "shape" : "CdnConfiguration", "documentation" : "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

" }, + "DashConfiguration" : { + "shape" : "DashConfiguration", + "documentation" : "

The configuration for DASH content.

" + }, + "HlsConfiguration" : { + "shape" : "HlsConfiguration", + "documentation" : "

The configuration for HLS content.

" + }, "Name" : { "shape" : "__string", - "documentation" : "

The identifier for the configuration.

" + "documentation" : "

The identifier for the playback configuration.

" + }, + "PlaybackConfigurationArn" : { + "shape" : "__string", + "documentation" : "

The Amazon Resource Name (ARN) for the playback configuration.

" + }, + "PlaybackEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

" + }, + "SessionInitializationEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that the player uses to initialize a session that uses client-side reporting.

" }, "SlateAdUrl" : { "shape" : "__string", - "documentation" : "

URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + "documentation" : "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

The tags assigned to the playback configuration.

" + }, + "TranscodeProfileName" : { + "shape" : "__string", + "documentation" : "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

" }, "VideoContentSourceUrl" : { "shape" : "__string", @@ -242,7 +343,7 @@ "shape" : "__string", "location" : "querystring", "locationName" : "NextToken", - "documentation" : "

Pagination token returned by the GET list request when results overrun the meximum allowed. Use the token to fetch the next page of results.

" + "documentation" : "

Pagination token returned by the GET list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

" } } }, @@ -251,11 +352,33 @@ "members" : { "Items" : { "shape" : "__listOfPlaybackConfigurations", - "documentation" : "

Array of playback configurations. This may be all of the available configurations or a subset, depending on the settings you provide and on the total number of configurations stored.

" + "documentation" : "

Array of playback configurations. This might be all the available configurations or a subset, depending on the settings that you provide and the total number of configurations stored.

" }, "NextToken" : { "shape" : "__string", - "documentation" : "

Pagination token returned by the GET list request when results overrun the meximum allowed. Use the token to fetch the next page of results.

" + "documentation" : "

Pagination token returned by the GET list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

" + } + } + }, + "ListTagsForResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ResourceArn", + "documentation" : "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

" + } + }, + "required" : [ "ResourceArn" ] + }, + "ListTagsForResourceResponse" : { + "type" : "structure", + "members" : { + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

A comma-separated list of tag key:value pairs. For example: \n {\n \"Key1\": \"Value1\",\n \"Key2\": \"Value2\"\n }\n

" } } }, @@ -264,7 +387,7 @@ "members" : { "AdDecisionServerUrl" : { "shape" : "__string", - "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25000 characters.

" + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25,000 characters.

" }, "CdnConfiguration" : { "shape" : "CdnConfiguration", @@ -272,19 +395,24 @@ }, "DashConfiguration" : { "shape" : "DashConfigurationForPut", - "documentation" : "

The configuration object for DASH content.

" + "documentation" : "

The configuration for DASH content.

" }, "Name" : { "shape" : "__string", - "documentation" : "

The identifier for the configuration.

" + "documentation" : "

The identifier for the playback configuration.

" }, "SlateAdUrl" : { "shape" : "__string", - "documentation" : "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + "documentation" : "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

The tags to assign to the playback configuration.

" }, "TranscodeProfileName" : { "shape" : "__string", - "documentation" : "

Associate this playbackConfiguration with a custom transcode profile, overriding MediaTailor's dynamic transcoding defaults. Do not include this field if you have not setup custom profiles with the MediaTailor service team.

" + "documentation" : "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

" }, "VideoContentSourceUrl" : { "shape" : "__string", @@ -297,7 +425,7 @@ "members" : { "AdDecisionServerUrl" : { "shape" : "__string", - "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25000 characters.

" + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

" }, "CdnConfiguration" : { "shape" : "CdnConfiguration", @@ -305,7 +433,7 @@ }, "DashConfiguration" : { "shape" : "DashConfiguration", - "documentation" : "

The configuration object for DASH content.

" + "documentation" : "

The configuration for DASH content.

" }, "HlsConfiguration" : { "shape" : "HlsConfiguration", @@ -313,7 +441,11 @@ }, "Name" : { "shape" : "__string", - "documentation" : "

The identifier for the configuration.

" + "documentation" : "

The identifier for the playback configuration.

" + }, + "PlaybackConfigurationArn" : { + "shape" : "__string", + "documentation" : "

The Amazon Resource Name (ARN) for the playback configuration.

" }, "PlaybackEndpointPrefix" : { "shape" : "__string", @@ -325,11 +457,16 @@ }, "SlateAdUrl" : { "shape" : "__string", - "documentation" : "

URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + "documentation" : "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

The tags assigned to the playback configuration.

" }, "TranscodeProfileName" : { "shape" : "__string", - "documentation" : "

Associate this playbackConfiguration with a custom transcode profile, overriding MediaTailor's dynamic transcoding defaults. Do not include this field if you have not setup custom profiles with the MediaTailor service team.

" + "documentation" : "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

" }, "VideoContentSourceUrl" : { "shape" : "__string", @@ -337,6 +474,53 @@ } } }, + "TagResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ResourceArn", + "documentation" : "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

" + }, + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

A comma-separated list of tag key:value pairs. For example: \n {\n \"Key1\": \"Value1\",\n \"Key2\": \"Value2\"\n }\n

" + } + }, + "required" : [ "ResourceArn", "Tags" ] + }, + "TagsModel" : { + "type" : "structure", + "members" : { + "Tags" : { + "shape" : "__mapOf__string", + "locationName" : "tags", + "documentation" : "

A comma-separated list of tag key:value pairs. For example: \n {\n \"Key1\": \"Value1\",\n \"Key2\": \"Value2\"\n }\n

" + } + }, + "documentation" : "

A set of tags assigned to a resource.

", + "required" : [ "Tags" ] + }, + "UntagResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ResourceArn", + "documentation" : "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

" + }, + "TagKeys" : { + "shape" : "__listOf__string", + "location" : "querystring", + "locationName" : "tagKeys", + "documentation" : "

A comma-separated list of the tag keys to remove from the playback configuration.

" + } + }, + "required" : [ "ResourceArn", "TagKeys" ] + }, "__boolean" : { "type" : "boolean" }, @@ -352,9 +536,24 @@ "shape" : "PlaybackConfiguration" } }, + "__listOf__string" : { + "type" : "list", + "member" : { + "shape" : "__string" + } + }, "__long" : { "type" : "long" }, + "__mapOf__string" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "__string" + } + }, "__string" : { "type" : "string" }, @@ -363,7 +562,7 @@ "min": 1, "max": 100 }, - "__timestampIso8601" : { + "__timestampIso8601" : { "type" : "timestamp", "timestampFormat" : "iso8601" }, @@ -373,4 +572,4 @@ } }, "documentation" : "

Use the AWS Elemental MediaTailor SDK to configure scalable ad insertion for your live and VOD content. With AWS Elemental MediaTailor, you can serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.

Through the SDK, you manage AWS Elemental MediaTailor configurations the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).

" -} +} \ No newline at end of file diff --git a/botocore/data/mgh/2017-05-31/paginators-1.json b/botocore/data/mgh/2017-05-31/paginators-1.json index ea142457..4a1efc37 100644 --- a/botocore/data/mgh/2017-05-31/paginators-1.json +++ b/botocore/data/mgh/2017-05-31/paginators-1.json @@ -1,3 +1,28 @@ { - "pagination": {} + "pagination": { + "ListCreatedArtifacts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CreatedArtifactList" + }, + "ListDiscoveredResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DiscoveredResourceList" + }, + "ListMigrationTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MigrationTaskSummaryList" + }, + "ListProgressUpdateStreams": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ProgressUpdateStreamSummaryList" + } + } } diff --git a/botocore/data/mq/2017-11-27/paginators-1.json b/botocore/data/mq/2017-11-27/paginators-1.json index ea142457..55160732 100644 --- a/botocore/data/mq/2017-11-27/paginators-1.json +++ b/botocore/data/mq/2017-11-27/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListBrokers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "BrokerSummaries" + } + } } diff --git a/botocore/data/neptune/2014-10-31/paginators-1.json b/botocore/data/neptune/2014-10-31/paginators-1.json index f1a247f7..a93bd84d 100644 --- a/botocore/data/neptune/2014-10-31/paginators-1.json +++ b/botocore/data/neptune/2014-10-31/paginators-1.json @@ -53,6 +53,36 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "OrderableDBInstanceOptions" + }, + "DescribeDBClusterParameterGroups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterParameterGroups" + }, + "DescribeDBClusterParameters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Parameters" + }, + "DescribeDBClusterSnapshots": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterSnapshots" + }, + "DescribeDBClusters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusters" + }, + "DescribePendingMaintenanceActions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "PendingMaintenanceActions" } } } diff --git a/botocore/data/opsworkscm/2016-11-01/paginators-1.json b/botocore/data/opsworkscm/2016-11-01/paginators-1.json index ea142457..3324f85e 100644 --- a/botocore/data/opsworkscm/2016-11-01/paginators-1.json +++ b/botocore/data/opsworkscm/2016-11-01/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "DescribeBackups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Backups" + }, + "DescribeEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ServerEvents" + }, + "DescribeServers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Servers" + } + } } diff --git a/botocore/data/opsworkscm/2016-11-01/service-2.json b/botocore/data/opsworkscm/2016-11-01/service-2.json index c4b4ba9d..a87707fc 100644 --- a/botocore/data/opsworkscm/2016-11-01/service-2.json +++ b/botocore/data/opsworkscm/2016-11-01/service-2.json @@ -248,7 +248,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Updates engine-specific attributes on a specified server. The server enters the MODIFYING state when this operation is in progress. Only one update can occur at a time. You can use this command to reset a Chef server's private key (CHEF_PIVOTAL_KEY), a Chef server's admin password (CHEF_DELIVERY_ADMIN_PASSWORD), or a Puppet server's admin password (PUPPET_ADMIN_PASSWORD).

This operation is asynchronous.

This operation can only be called for servers in HEALTHY or UNHEALTHY states. Otherwise, an InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid.

" + "documentation":"

Updates engine-specific attributes on a specified server. The server enters the MODIFYING state when this operation is in progress. Only one update can occur at a time. You can use this command to reset a Chef server's public key (CHEF_PIVOTAL_KEY) or a Puppet server's admin password (PUPPET_ADMIN_PASSWORD).

This operation is asynchronous.

This operation can only be called for servers in HEALTHY or UNHEALTHY states. Otherwise, an InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid.

" } }, "shapes":{ @@ -500,7 +500,7 @@ }, "EngineAttributes":{ "shape":"EngineAttributes", - "documentation":"

Optional engine attributes on a specified server.

Attributes accepted in a Chef createServer request:

  • CHEF_PIVOTAL_KEY: A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, one is generated and returned in the response.

  • CHEF_DELIVERY_ADMIN_PASSWORD: The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^&+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response.

Attributes accepted in a Puppet createServer request:

  • PUPPET_ADMIN_PASSWORD: To work with the Puppet Enterprise console, a password must use ASCII characters.

" + "documentation":"

Optional engine attributes on a specified server.

Attributes accepted in a Chef createServer request:

  • CHEF_PIVOTAL_KEY: A base64-encoded RSA public key. The corresponding private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, a private key is generated and returned in the response.

  • CHEF_DELIVERY_ADMIN_PASSWORD: The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^&+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response.

Attributes accepted in a Puppet createServer request:

  • PUPPET_ADMIN_PASSWORD: To work with the Puppet Enterprise console, a password must use ASCII characters.

  • PUPPET_R10K_REMOTE: The r10k remote is the URL of your control repository (for example, ssh://git@your.git-repo.com:user/control-repo.git). Specifying an r10k remote opens TCP port 8170.

  • PUPPET_R10K_PRIVATE_KEY: If you are using a private Git repository, add PUPPET_R10K_PRIVATE_KEY to specify an SSH URL and a PEM-encoded private SSH key.

" }, "BackupRetentionCount":{ "shape":"BackupRetentionCountDefinition", @@ -540,7 +540,7 @@ }, "SubnetIds":{ "shape":"Strings", - "documentation":"

The IDs of subnets in which to launch the server EC2 instance.

Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.

EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.

For more information about supported Amazon EC2 platforms, see Supported Platforms.

" + "documentation":"

The IDs of subnets in which to launch the server EC2 instance.

Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.

EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.

For more information about supported Amazon EC2 platforms, see Supported Platforms.

" }, "BackupId":{ "shape":"BackupId", @@ -788,15 +788,15 @@ "members":{ "ExportAttributeName":{ "shape":"String", - "documentation":"

The name of the export attribute. Currently supported export attribute is \"Userdata\" which exports a userdata script filled out with parameters provided in the InputAttributes list.

" + "documentation":"

The name of the export attribute. Currently, the supported export attribute is Userdata. This exports a user data script that includes parameters and values provided in the InputAttributes list.

" }, "ServerName":{ "shape":"ServerName", - "documentation":"

The name of the Server to which the attribute is being exported from

" + "documentation":"

The name of the server from which you are exporting the attribute.

" }, "InputAttributes":{ "shape":"EngineAttributes", - "documentation":"

The list of engine attributes. The list type is EngineAttribute. EngineAttribute is a pair of attribute name and value. For ExportAttributeName \"Userdata\", currently supported input attribute names are: - \"RunList\": For Chef, an ordered list of roles and/or recipes that are run in the exact order. For Puppet, this parameter is ignored. - \"OrganizationName\": For Chef, an organization name. AWS OpsWorks for Chef Server always creates the organization \"default\". For Puppet, this parameter is ignored. - \"NodeEnvironment\": For Chef, a node environment (eg. development, staging, onebox). For Puppet, this parameter is ignored. - \"NodeClientVersion\": For Chef, version of Chef Engine (3 numbers separated by dots, eg. \"13.8.5\"). If empty, it uses the latest one. For Puppet, this parameter is ignored.

" + "documentation":"

The list of engine attributes. The list type is EngineAttribute. An EngineAttribute list item is a pair that includes an attribute name and its value. For the Userdata ExportAttributeName, the following are supported engine attribute names.

  • RunList In Chef, a list of roles or recipes that are run in the specified order. In Puppet, this parameter is ignored.

  • OrganizationName In Chef, an organization name. AWS OpsWorks for Chef Automate always creates the organization default. In Puppet, this parameter is ignored.

  • NodeEnvironment In Chef, a node environment (for example, development, staging, or one-box). In Puppet, this parameter is ignored.

  • NodeClientVersion In Chef, the version of the Chef engine (three numbers separated by dots, such as 13.8.5). If this attribute is empty, OpsWorks for Chef Automate uses the most current version. In Puppet, this parameter is ignored.

" } } }, @@ -805,11 +805,11 @@ "members":{ "EngineAttribute":{ "shape":"EngineAttribute", - "documentation":"

The requested engine attribute pair with attribute name and value.

" + "documentation":"

The requested engine attribute pair with attribute name and value.

" }, "ServerName":{ "shape":"ServerName", - "documentation":"

The requested ServerName.

" + "documentation":"

The server name used in the request.

" } } }, @@ -876,7 +876,7 @@ "NodeAssociationStatusToken":{"type":"string"}, "NodeName":{ "type":"string", - "documentation":"

The node name that is used by chef-client or puppet-agentfor a new node. We recommend to use a unique FQDN as hostname. For more information, see the Chef or Puppet documentation.

", + "documentation":"

The node name that is used by chef-client or puppet-agentfor a new node. We recommend to use a unique FQDN as hostname. For more information, see the Chef or Puppet documentation.

", "pattern":"^[\\-\\p{Alnum}_:.]+$" }, "ResourceAlreadyExistsException":{ diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index 6685059c..be51fc1e 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -34,7 +34,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

This operation can be called only by the following principals when they also have the relevant IAM permissions:

  • Invitation to join or Approve all features request handshakes: only a principal from the member account.

    The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

  • Enable all features final confirmation handshake: only a principal from the master account.

    For more information about invitations, see Inviting an AWS Account to Join Your Organization in the AWS Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that it is deleted.

" + "documentation":"

Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

This operation can be called only by the following principals when they also have the relevant IAM permissions:

  • Invitation to join or Approve all features request handshakes: only a principal from the member account.

    The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

  • Enable all features final confirmation handshake: only a principal from the master account.

    For more information about invitations, see Inviting an AWS Account to Join Your Organization in the AWS Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that it is deleted.

" }, "AttachPolicy":{ "name":"AttachPolicy", @@ -56,7 +56,7 @@ {"shape":"TargetNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, then it affects only that account.

    SCPs essentially are permission \"filters\". When you attach one SCP to a higher level root or OU, and you also attach a different SCP to a child OU or to an account, the child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child cannot grant a permission that is not already granted by the parent. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They cannot use A or B because they were filtered out by the child OU. They also cannot use F and G because they were filtered out by the parent OU. They cannot be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named \"FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, then it affects only that account.

    SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU). When you attach one SCP to a higher level root or OU, and you also attach a different SCP to a child OU or to an account, the child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child cannot grant a permission that is not already granted by the parent. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They cannot use A or B because they were filtered out by the child OU. They also cannot use F and G because they were filtered out by the parent OU. They cannot be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named \"FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" }, "CancelHandshake":{ "name":"CancelHandshake", @@ -96,7 +96,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts is not recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" + "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateOrganization":{ "name":"CreateOrganization", @@ -116,7 +116,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

Creates an AWS organization. The account whose user is calling the CreateOrganization operation automatically becomes the master account of the new organization.

This operation must be called using credentials from the account that is to become the new organization's master account. The principal must also have the relevant IAM permissions.

By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING\", then no policy types are enabled by default and you cannot use organization policies.

" + "documentation":"

Creates an AWS organization. The account whose user is calling the CreateOrganization operation automatically becomes the master account of the new organization.

This operation must be called using credentials from the account that is to become the new organization's master account. The principal must also have the relevant IAM permissions.

By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING\", then no policy types are enabled by default and you cannot use organization policies.

" }, "CreateOrganizationalUnit":{ "name":"CreateOrganizationalUnit", @@ -137,7 +137,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

For more information about OUs, see Managing Organizational Units in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

For more information about OUs, see Managing Organizational Units in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" }, "CreatePolicy":{ "name":"CreatePolicy", @@ -159,7 +159,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual AWS account.

For more information about policies and their use, see Managing Organization Policies.

This operation can be called only from the organization's master account.

" + "documentation":"

Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual AWS account.

For more information about policies and their use, see Managing Organization Policies.

This operation can be called only from the organization's master account.

" }, "DeclineHandshake":{ "name":"DeclineHandshake", @@ -361,7 +361,7 @@ {"shape":"TargetNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, then you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), then you are using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

" + "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, then you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), then you are using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

" }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -437,7 +437,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that AWS Organizations supports. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

After you enable all features in your organization, the master account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The master account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

This operation can be called only from the organization's master account.

" + "documentation":"

Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that AWS Organizations supports. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

After you enable all features in your organization, the master account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The master account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

This operation can be called only from the organization's master account.

" }, "EnablePolicyType":{ "name":"EnablePolicyType", @@ -848,7 +848,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

", + "documentation":"

You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

", "exception":true }, "AccessDeniedForDependencyException":{ @@ -873,7 +873,7 @@ }, "Arn":{ "shape":"AccountArn", - "documentation":"

The Amazon Resource Name (ARN) of the account.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the account.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "Email":{ "shape":"Email", @@ -1106,11 +1106,11 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the AWS Organizations User Guide, and steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using IAM Roles in the IAM User Guide.

The regex pattern that is used to validate this parameter is a string of characters that can consist of uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" + "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the AWS Organizations User Guide, and steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using IAM Roles in the IAM User Guide.

The regex pattern that is used to validate this parameter is a string of characters that can consist of uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", - "documentation":"

If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

" + "documentation":"

If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

" } } }, @@ -1190,7 +1190,7 @@ "members":{ "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

  • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the master account. For more information, see Consolidated billing in the AWS Organizations User Guide.

  • ALL: In addition to all the features supported by the consolidated billing feature set, the master account can also apply any type of policy to any member account in the organization. For more information, see All features in the AWS Organizations User Guide.

" + "documentation":"

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

  • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the master account. For more information, see Consolidated billing in the AWS Organizations User Guide.

  • ALL: In addition to all the features supported by the consolidated billing feature set, the master account can also apply any type of policy to any member account in the organization. For more information, see All features in the AWS Organizations User Guide.

" } } }, @@ -1240,7 +1240,7 @@ "members":{ "Content":{ "shape":"PolicyContent", - "documentation":"

The policy content to add to the new policy. For example, if you create a service control policy (SCP), this string must be JSON text that specifies the permissions that admins in attached accounts can delegate to their users, groups, and roles. For more information about the SCP syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

" + "documentation":"

The policy content to add to the new policy. For example, if you create a service control policy (SCP), this string must be JSON text that specifies the permissions that admins in attached accounts can delegate to their users, groups, and roles. For more information about the SCP syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

" }, "Description":{ "shape":"PolicyDescription", @@ -1607,7 +1607,7 @@ }, "Arn":{ "shape":"HandshakeArn", - "documentation":"

The Amazon Resource Name (ARN) of a handshake.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of a handshake.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "Parties":{ "shape":"HandshakeParties", @@ -2018,7 +2018,7 @@ "members":{ "Filter":{ "shape":"HandshakeFilter", - "documentation":"

Filters the handshakes that you want included in the response. The default is all types. Use the ActionType element to limit the output to only a specified type, such as INVITE, ENABLE-FULL-CONTROL, or APPROVE-FULL-CONTROL. Alternatively, for the ENABLE-FULL-CONTROL handshake that generates a separate child handshake for each member account, you can specify ParentHandshakeId to see only the handshakes that were generated by that parent request.

" + "documentation":"

Filters the handshakes that you want included in the response. The default is all types. Use the ActionType element to limit the output to only a specified type, such as INVITE, ENABLE_ALL_FEATURES, or APPROVE_ALL_FEATURES. Alternatively, for the ENABLE_ALL_FEATURES handshake that generates a separate child handshake for each member account, you can specify ParentHandshakeId to see only the handshakes that were generated by that parent request.

" }, "NextToken":{ "shape":"NextToken", @@ -2266,7 +2266,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

", + "documentation":"

The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

", "exception":true }, "MasterCannotLeaveOrganizationException":{ @@ -2315,15 +2315,15 @@ }, "Arn":{ "shape":"OrganizationArn", - "documentation":"

The Amazon Resource Name (ARN) of an organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of an organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

Specifies the functionality that currently is available to the organization. If set to \"ALL\", then all features are enabled and policies can be applied to accounts in the organization. If set to \"CONSOLIDATED_BILLING\", then only consolidated billing functionality is available. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

" + "documentation":"

Specifies the functionality that currently is available to the organization. If set to \"ALL\", then all features are enabled and policies can be applied to accounts in the organization. If set to \"CONSOLIDATED_BILLING\", then only consolidated billing functionality is available. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

" }, "MasterAccountArn":{ "shape":"AccountArn", - "documentation":"

The Amazon Resource Name (ARN) of the account that is designated as the master account for the organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the account that is designated as the master account for the organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "MasterAccountId":{ "shape":"AccountId", @@ -2372,7 +2372,7 @@ }, "Arn":{ "shape":"OrganizationalUnitArn", - "documentation":"

The Amazon Resource Name (ARN) of this OU.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of this OU.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "Name":{ "shape":"OrganizationalUnitName", @@ -2524,7 +2524,7 @@ }, "Arn":{ "shape":"PolicyArn", - "documentation":"

The Amazon Resource Name (ARN) of the policy.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the policy.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "Name":{ "shape":"PolicyName", @@ -2558,7 +2558,7 @@ }, "Arn":{ "shape":"GenericArn", - "documentation":"

The Amazon Resource Name (ARN) of the policy target.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the policy target.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "Name":{ "shape":"TargetName", @@ -2592,7 +2592,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Enabling and Disabling a Policy Type on a Root in the AWS Organizations User Guide.

", + "documentation":"

You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Enabling and Disabling a Policy Type on a Root in the AWS Organizations User Guide.

", "exception":true }, "PolicyTypeNotEnabledException":{ @@ -2600,7 +2600,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

", + "documentation":"

The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

", "exception":true }, "PolicyTypeStatus":{ @@ -2652,7 +2652,7 @@ }, "Arn":{ "shape":"RootArn", - "documentation":"

The Amazon Resource Name (ARN) of the root.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the root.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "Name":{ "shape":"RootName", @@ -2740,7 +2740,7 @@ "Type":{"shape":"ExceptionType"}, "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You've sent too many requests in too short a period of time. The limit helps protect against denial-of-service attacks. Try again later.

", + "documentation":"

You've sent too many requests in too short a period of time. The limit helps protect against denial-of-service attacks. Try again later.

For information on limits that affect Organizations, see Limits of AWS Organizations in the AWS Organizations User Guide.

", "exception":true }, "UpdateOrganizationalUnitRequest":{ @@ -2784,7 +2784,7 @@ }, "Content":{ "shape":"PolicyContent", - "documentation":"

If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see Service Control Policy Syntax in the AWS Organizations User Guide.

" + "documentation":"

If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see Service Control Policy Syntax in the AWS Organizations User Guide.

" } } }, @@ -2798,5 +2798,5 @@ } } }, - "documentation":"AWS Organizations API Reference

AWS Organizations is a web service that enables you to consolidate your multiple AWS accounts into an organization and centrally manage your accounts and their resources.

This guide provides descriptions of the Organizations API. For more information about using this service, see the AWS Organizations User Guide.

API Version

This version of the Organizations API Reference documents the Organizations API version 2016-11-28.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, and more). The SDKs provide a convenient way to create programmatic access to AWS Organizations. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Organizations. However, you also can use the Organizations Query API to make direct calls to the Organizations web service. To learn more about the Organizations Query API, see Making Query Requests in the AWS Organizations User Guide. Organizations supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Signing Requests

When you send HTTP requests to AWS, you must sign the requests so that AWS can identify who sent them. You sign requests with your AWS access key, which consists of an access key ID and a secret access key. We strongly recommend that you do not create an access key for your root account. Anyone who has the access key for your root account has unrestricted access to all the resources in your account. Instead, create an access key for an IAM user account that has administrative privileges. As another option, use AWS Security Token Service to generate temporary security credentials, and use those credentials to sign requests.

To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

When you use the AWS Command Line Interface (AWS CLI) or one of the AWS SDKs to make requests to AWS, these tools automatically sign the requests for you with the access key that you specify when you configure the tools.

In this release, each organization can have only one root. In a future release, a single organization will support multiple roots.

Support and Feedback for AWS Organizations

We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the AWS Organizations support forum. For more information about the AWS support forums, see Forums Help.

Endpoint to Call When Using the CLI or the AWS API

For the current release of Organizations, you must specify the us-east-1 region for all AWS API and CLI calls. You can do this in the CLI by using these parameters and commands:

  • Use the following parameter with each command to specify both the endpoint and its region:

    --endpoint-url https://organizations.us-east-1.amazonaws.com

  • Use the default endpoint, but configure your default region with this command:

    aws configure set default.region us-east-1

  • Use the following parameter with each command to specify the endpoint:

    --region us-east-1

For the various SDKs used to call the APIs, see the documentation for the SDK of interest to learn how to direct the requests to a specific endpoint. For more information, see Regions and Endpoints in the AWS General Reference.

How examples are presented

The JSON returned by the AWS Organizations service as response to your requests is returned as a single long string without line breaks or formatting whitespace. Both line breaks and whitespace are included in the examples in this guide to improve readability. When example input parameters also would result in long strings that would extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Recording API Requests

AWS Organizations supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by AWS CloudTrail, you can determine which requests were successfully made to Organizations, who made the request, when it was made, and so on. For more about AWS Organizations and its support for AWS CloudTrail, see Logging AWS Organizations Events with AWS CloudTrail in the AWS Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" + "documentation":"AWS Organizations API Reference

AWS Organizations is a web service that enables you to consolidate your multiple AWS accounts into an organization and centrally manage your accounts and their resources.

This guide provides descriptions of the Organizations API. For more information about using this service, see the AWS Organizations User Guide.

API Version

This version of the Organizations API Reference documents the Organizations API version 2016-11-28.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, and more). The SDKs provide a convenient way to create programmatic access to AWS Organizations. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Organizations. However, you also can use the Organizations Query API to make direct calls to the Organizations web service. To learn more about the Organizations Query API, see Making Query Requests in the AWS Organizations User Guide. Organizations supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Signing Requests

When you send HTTP requests to AWS, you must sign the requests so that AWS can identify who sent them. You sign requests with your AWS access key, which consists of an access key ID and a secret access key. We strongly recommend that you do not create an access key for your root account. Anyone who has the access key for your root account has unrestricted access to all the resources in your account. Instead, create an access key for an IAM user account that has administrative privileges. As another option, use AWS Security Token Service to generate temporary security credentials, and use those credentials to sign requests.

To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

When you use the AWS Command Line Interface (AWS CLI) or one of the AWS SDKs to make requests to AWS, these tools automatically sign the requests for you with the access key that you specify when you configure the tools.

In this release, each organization can have only one root. In a future release, a single organization will support multiple roots.

Support and Feedback for AWS Organizations

We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the AWS Organizations support forum. For more information about the AWS support forums, see Forums Help.

Endpoint to Call When Using the CLI or the AWS API

For the current release of Organizations, you must specify the us-east-1 region for all AWS API and CLI calls. You can do this in the CLI by using these parameters and commands:

  • Use the following parameter with each command to specify both the endpoint and its region:

    --endpoint-url https://organizations.us-east-1.amazonaws.com

  • Use the default endpoint, but configure your default region with this command:

    aws configure set default.region us-east-1

  • Use the following parameter with each command to specify the endpoint:

    --region us-east-1

For the various SDKs used to call the APIs, see the documentation for the SDK of interest to learn how to direct the requests to a specific endpoint. For more information, see Regions and Endpoints in the AWS General Reference.

How examples are presented

The JSON returned by the AWS Organizations service as response to your requests is returned as a single long string without line breaks or formatting whitespace. Both line breaks and whitespace are included in the examples in this guide to improve readability. When example input parameters also would result in long strings that would extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Recording API Requests

AWS Organizations supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by AWS CloudTrail, you can determine which requests were successfully made to Organizations, who made the request, when it was made, and so on. For more about AWS Organizations and its support for AWS CloudTrail, see Logging AWS Organizations Events with AWS CloudTrail in the AWS Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" } diff --git a/botocore/data/pinpoint-email/2018-07-26/paginators-1.json b/botocore/data/pinpoint-email/2018-07-26/paginators-1.json index ea142457..f2693b19 100644 --- a/botocore/data/pinpoint-email/2018-07-26/paginators-1.json +++ b/botocore/data/pinpoint-email/2018-07-26/paginators-1.json @@ -1,3 +1,34 @@ { - "pagination": {} + "pagination": { + "GetDedicatedIps": { + "input_token": "NextToken", + "limit_key": "PageSize", + "output_token": "NextToken", + "result_key": "DedicatedIps" + }, + "ListConfigurationSets": { + "input_token": "NextToken", + "limit_key": "PageSize", + "output_token": "NextToken", + "result_key": "ConfigurationSets" + }, + "ListDedicatedIpPools": { + "input_token": "NextToken", + "limit_key": "PageSize", + "output_token": "NextToken", + "result_key": "DedicatedIpPools" + }, + "ListDeliverabilityTestReports": { + "input_token": "NextToken", + "limit_key": "PageSize", + "output_token": "NextToken", + "result_key": "DeliverabilityTestReports" + }, + "ListEmailIdentities": { + "input_token": "NextToken", + "limit_key": "PageSize", + "output_token": "NextToken", + "result_key": "EmailIdentities" + } + } } diff --git a/botocore/data/pinpoint-sms-voice/2018-09-05/service-2.json b/botocore/data/pinpoint-sms-voice/2018-09-05/service-2.json new file mode 100644 index 00000000..7938a2ce --- /dev/null +++ b/botocore/data/pinpoint-sms-voice/2018-09-05/service-2.json @@ -0,0 +1,677 @@ +{ + "metadata" : { + "apiVersion" : "2018-09-05", + "endpointPrefix" : "sms-voice.pinpoint", + "signingName" : "sms-voice", + "serviceAbbreviation":"Pinpoint SMS Voice", + "serviceFullName" : "Amazon Pinpoint SMS and Voice Service", + "serviceId" : "Pinpoint SMS Voice", + "protocol" : "rest-json", + "jsonVersion" : "1.1", + "uid" : "pinpoint-sms-voice-2018-09-05", + "signatureVersion" : "v4" + }, + "operations" : { + "CreateConfigurationSet" : { + "name" : "CreateConfigurationSet", + "http" : { + "method" : "POST", + "requestUri" : "/v1/sms-voice/configuration-sets", + "responseCode" : 200 + }, + "input" : { + "shape" : "CreateConfigurationSetRequest" + }, + "output" : { + "shape" : "CreateConfigurationSetResponse", + "documentation" : "CreateConfigurationSetResponse" + }, + "errors" : [ { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "LimitExceededException", + "documentation" : "LimitExceededException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + }, { + "shape" : "AlreadyExistsException", + "documentation" : "AlreadyExistsException" + } ], + "documentation" : "Create a new configuration set. After you create the configuration set, you can add one or more event destinations to it." + }, + "CreateConfigurationSetEventDestination" : { + "name" : "CreateConfigurationSetEventDestination", + "http" : { + "method" : "POST", + "requestUri" : "/v1/sms-voice/configuration-sets/{ConfigurationSetName}/event-destinations", + "responseCode" : 200 + }, + "input" : { + "shape" : "CreateConfigurationSetEventDestinationRequest" + }, + "output" : { + "shape" : "CreateConfigurationSetEventDestinationResponse", + "documentation" : "CreateConfigurationSetEventDestinationResponse" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "LimitExceededException", + "documentation" : "LimitExceededException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + }, { + "shape" : "NotFoundException", + "documentation" : "NotFoundException" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "AlreadyExistsException", + "documentation" : "AlreadyExistsException" + } ], + "documentation" : "Create a new event destination in a configuration set." + }, + "DeleteConfigurationSet" : { + "name" : "DeleteConfigurationSet", + "http" : { + "method" : "DELETE", + "requestUri" : "/v1/sms-voice/configuration-sets/{ConfigurationSetName}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DeleteConfigurationSetRequest" + }, + "output" : { + "shape" : "DeleteConfigurationSetResponse", + "documentation" : "DeleteConfigurationSetResponse" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "NotFoundException" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + } ], + "documentation" : "Deletes an existing configuration set." + }, + "DeleteConfigurationSetEventDestination" : { + "name" : "DeleteConfigurationSetEventDestination", + "http" : { + "method" : "DELETE", + "requestUri" : "/v1/sms-voice/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DeleteConfigurationSetEventDestinationRequest" + }, + "output" : { + "shape" : "DeleteConfigurationSetEventDestinationResponse", + "documentation" : "DeleteConfigurationSetEventDestinationResponse" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "NotFoundException" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + } ], + "documentation" : "Deletes an event destination in a configuration set." + }, + "GetConfigurationSetEventDestinations" : { + "name" : "GetConfigurationSetEventDestinations", + "http" : { + "method" : "GET", + "requestUri" : "/v1/sms-voice/configuration-sets/{ConfigurationSetName}/event-destinations", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetConfigurationSetEventDestinationsRequest" + }, + "output" : { + "shape" : "GetConfigurationSetEventDestinationsResponse", + "documentation" : "GetConfigurationSetEventDestinationsResponse" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "NotFoundException" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + } ], + "documentation" : "Obtain information about an event destination, including the types of events it reports, the Amazon Resource Name (ARN) of the destination, and the name of the event destination." + }, + "SendVoiceMessage" : { + "name" : "SendVoiceMessage", + "http" : { + "method" : "POST", + "requestUri" : "/v1/sms-voice/voice/message", + "responseCode" : 200 + }, + "input" : { + "shape" : "SendVoiceMessageRequest" + }, + "output" : { + "shape" : "SendVoiceMessageResponse", + "documentation" : "SendVoiceMessageResponse" + }, + "errors" : [ { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + } ], + "documentation" : "Create a new voice message and send it to a recipient's phone number." + }, + "UpdateConfigurationSetEventDestination" : { + "name" : "UpdateConfigurationSetEventDestination", + "http" : { + "method" : "PUT", + "requestUri" : "/v1/sms-voice/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}", + "responseCode" : 200 + }, + "input" : { + "shape" : "UpdateConfigurationSetEventDestinationRequest" + }, + "output" : { + "shape" : "UpdateConfigurationSetEventDestinationResponse", + "documentation" : "UpdateConfigurationSetEventDestinationResponse" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "NotFoundException" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + } ], + "documentation" : "Update an event destination in a configuration set. An event destination is a location that you publish information about your voice calls to. For example, you can log an event to an Amazon CloudWatch destination when a call fails." + } + }, + "shapes" : { + "AlreadyExistsException" : { + "type" : "structure", + "documentation" : "The resource specified in your request already exists.", + "members" : { + "Message" : { + "shape" : "String" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 409 + } + }, + "BadRequestException" : { + "type" : "structure", + "documentation" : "The input you provided is invalid.", + "members" : { + "Message" : { + "shape" : "String" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 400 + } + }, + "Boolean" : { + "type" : "boolean" + }, + "CallInstructionsMessageType" : { + "type" : "structure", + "members" : { + "Text" : { + "shape" : "NonEmptyString", + "documentation" : "The language to use when delivering the message. For a complete list of supported languages, see the Amazon Polly Developer Guide." + } + }, + "documentation" : "An object that defines a message that contains text formatted using Amazon Pinpoint Voice Instructions markup.", + "required" : [ ] + }, + "CloudWatchLogsDestination" : { + "type" : "structure", + "members" : { + "IamRoleArn" : { + "shape" : "String", + "documentation" : "The Amazon Resource Name (ARN) of an Amazon Identity and Access Management (IAM) role that is able to write event data to an Amazon CloudWatch destination." + }, + "LogGroupArn" : { + "shape" : "String", + "documentation" : "The name of the Amazon CloudWatch Log Group that you want to record events in." + } + }, + "documentation" : "An object that contains information about an event destination that sends data to Amazon CloudWatch Logs.", + "required" : [ ] + }, + "CreateConfigurationSetEventDestinationRequest" : { + "type" : "structure", + "members" : { + "ConfigurationSetName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ConfigurationSetName", + "documentation" : "ConfigurationSetName" + }, + "EventDestination" : { + "shape" : "EventDestinationDefinition" + }, + "EventDestinationName" : { + "shape" : "NonEmptyString", + "documentation" : "A name that identifies the event destination." + } + }, + "documentation" : "Create a new event destination in a configuration set.", + "required" : [ "ConfigurationSetName" ] + }, + "CreateConfigurationSetEventDestinationResponse" : { + "type" : "structure", + "members" : { }, + "documentation" : "An empty object that indicates that the event destination was created successfully." + }, + "CreateConfigurationSetRequest" : { + "type" : "structure", + "members" : { + "ConfigurationSetName" : { + "shape" : "WordCharactersWithDelimiters", + "documentation" : "The name that you want to give the configuration set." + } + }, + "documentation" : "A request to create a new configuration set." + }, + "CreateConfigurationSetResponse" : { + "type" : "structure", + "members" : { }, + "documentation" : "An empty object that indicates that the configuration set was successfully created." + }, + "DeleteConfigurationSetEventDestinationRequest" : { + "type" : "structure", + "members" : { + "ConfigurationSetName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ConfigurationSetName", + "documentation" : "ConfigurationSetName" + }, + "EventDestinationName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "EventDestinationName", + "documentation" : "EventDestinationName" + } + }, + "required" : [ "EventDestinationName", "ConfigurationSetName" ] + }, + "DeleteConfigurationSetEventDestinationResponse" : { + "type" : "structure", + "members" : { }, + "documentation" : "An empty object that indicates that the event destination was deleted successfully." + }, + "DeleteConfigurationSetRequest" : { + "type" : "structure", + "members" : { + "ConfigurationSetName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ConfigurationSetName", + "documentation" : "ConfigurationSetName" + } + }, + "required" : [ "ConfigurationSetName" ] + }, + "DeleteConfigurationSetResponse" : { + "type" : "structure", + "members" : { }, + "documentation" : "An empty object that indicates that the configuration set was deleted successfully." + }, + "EventDestination" : { + "type" : "structure", + "members" : { + "CloudWatchLogsDestination" : { + "shape" : "CloudWatchLogsDestination" + }, + "Enabled" : { + "shape" : "Boolean", + "documentation" : "Indicates whether or not the event destination is enabled. If the event destination is enabled, then Amazon Pinpoint sends response data to the specified event destination." + }, + "KinesisFirehoseDestination" : { + "shape" : "KinesisFirehoseDestination" + }, + "MatchingEventTypes" : { + "shape" : "EventTypes" + }, + "Name" : { + "shape" : "String", + "documentation" : "A name that identifies the event destination configuration." + }, + "SnsDestination" : { + "shape" : "SnsDestination" + } + }, + "documentation" : "An object that defines an event destination." + }, + "EventDestinationDefinition" : { + "type" : "structure", + "members" : { + "CloudWatchLogsDestination" : { + "shape" : "CloudWatchLogsDestination" + }, + "Enabled" : { + "shape" : "Boolean", + "documentation" : "Indicates whether or not the event destination is enabled. If the event destination is enabled, then Amazon Pinpoint sends response data to the specified event destination." + }, + "KinesisFirehoseDestination" : { + "shape" : "KinesisFirehoseDestination" + }, + "MatchingEventTypes" : { + "shape" : "EventTypes" + }, + "SnsDestination" : { + "shape" : "SnsDestination" + } + }, + "documentation" : "An object that defines a single event destination.", + "required" : [ ] + }, + "EventDestinations" : { + "type" : "list", + "documentation" : "An array of EventDestination objects. Each EventDestination object includes ARNs and other information that define an event destination.", + "member" : { + "shape" : "EventDestination" + } + }, + "EventType" : { + "type" : "string", + "documentation" : "The types of events that are sent to the event destination.", + "enum" : [ "INITIATED_CALL", "RINGING", "ANSWERED", "COMPLETED_CALL", "BUSY", "FAILED", "NO_ANSWER" ] + }, + "EventTypes" : { + "type" : "list", + "documentation" : "An array of EventDestination objects. Each EventDestination object includes ARNs and other information that define an event destination.", + "member" : { + "shape" : "EventType" + } + }, + "GetConfigurationSetEventDestinationsRequest" : { + "type" : "structure", + "members" : { + "ConfigurationSetName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ConfigurationSetName", + "documentation" : "ConfigurationSetName" + } + }, + "required" : [ "ConfigurationSetName" ] + }, + "GetConfigurationSetEventDestinationsResponse" : { + "type" : "structure", + "members" : { + "EventDestinations" : { + "shape" : "EventDestinations" + } + }, + "documentation" : "An object that contains information about an event destination." + }, + "InternalServiceErrorException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "String" + } + }, + "documentation" : "The API encountered an unexpected error and couldn't complete the request. You might be able to successfully issue the request again in the future.", + "exception" : true, + "error" : { + "httpStatusCode" : 500 + } + }, + "KinesisFirehoseDestination" : { + "type" : "structure", + "members" : { + "DeliveryStreamArn" : { + "shape" : "String", + "documentation" : "The Amazon Resource Name (ARN) of an IAM role that can write data to an Amazon Kinesis Data Firehose stream." + }, + "IamRoleArn" : { + "shape" : "String", + "documentation" : "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose destination that you want to use in the event destination." + } + }, + "documentation" : "An object that contains information about an event destination that sends data to Amazon Kinesis Data Firehose.", + "required" : [ ] + }, + "LimitExceededException" : { + "type" : "structure", + "documentation" : "There are too many instances of the specified resource type.", + "exception" : true, + "members" : { + "Message" : { + "shape" : "String" + } + }, + "error" : { + "httpStatusCode" : 412 + } + }, + "NonEmptyString" : { + "type" : "string" + }, + "NotFoundException" : { + "type" : "structure", + "documentation" : "The resource you attempted to access doesn't exist.", + "exception" : true, + "members" : { + "Message" : { + "shape" : "String" + } + }, + "error" : { + "httpStatusCode" : 404 + } + }, + "PlainTextMessageType" : { + "type" : "structure", + "members" : { + "LanguageCode" : { + "shape" : "String", + "documentation" : "The language to use when delivering the message. For a complete list of supported languages, see the Amazon Polly Developer Guide." + }, + "Text" : { + "shape" : "NonEmptyString", + "documentation" : "The plain (not SSML-formatted) text to deliver to the recipient." + }, + "VoiceId" : { + "shape" : "String", + "documentation" : "The name of the voice that you want to use to deliver the message. For a complete list of supported voices, see the Amazon Polly Developer Guide." + } + }, + "documentation" : "An object that defines a message that contains unformatted text.", + "required" : [ ] + }, + "SSMLMessageType" : { + "type" : "structure", + "members" : { + "LanguageCode" : { + "shape" : "String", + "documentation" : "The language to use when delivering the message. For a complete list of supported languages, see the Amazon Polly Developer Guide." + }, + "Text" : { + "shape" : "NonEmptyString", + "documentation" : "The SSML-formatted text to deliver to the recipient." + }, + "VoiceId" : { + "shape" : "String", + "documentation" : "The name of the voice that you want to use to deliver the message. For a complete list of supported voices, see the Amazon Polly Developer Guide." + } + }, + "documentation" : "An object that defines a message that contains SSML-formatted text.", + "required" : [ ] + }, + "SendVoiceMessageRequest" : { + "type" : "structure", + "members" : { + "CallerId" : { + "shape" : "String", + "documentation" : "The phone number that appears on recipients' devices when they receive the message." + }, + "ConfigurationSetName" : { + "shape" : "WordCharactersWithDelimiters", + "documentation" : "The name of the configuration set that you want to use to send the message." + }, + "Content" : { + "shape" : "VoiceMessageContent" + }, + "DestinationPhoneNumber" : { + "shape" : "NonEmptyString", + "documentation" : "The phone number that you want to send the voice message to." + }, + "OriginationPhoneNumber" : { + "shape" : "NonEmptyString", + "documentation" : "The phone number that Amazon Pinpoint should use to send the voice message. This isn't necessarily the phone number that appears on recipients' devices when they receive the message, because you can specify a CallerId parameter in the request." + } + }, + "documentation" : "SendVoiceMessageRequest" + }, + "SendVoiceMessageResponse" : { + "type" : "structure", + "members" : { + "MessageId" : { + "shape" : "String", + "documentation" : "A unique identifier for the voice message." + } + }, + "documentation" : "An object that that contains the Message ID of a Voice message that was sent successfully." + }, + "SnsDestination" : { + "type" : "structure", + "members" : { + "TopicArn" : { + "shape" : "String", + "documentation" : "The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish events to." + } + }, + "documentation" : "An object that contains information about an event destination that sends data to Amazon SNS.", + "required" : [ ] + }, + "String" : { + "type" : "string" + }, + "TooManyRequestsException" : { + "type" : "structure", + "documentation" : "You've issued too many requests to the resource. Wait a few minutes, and then try again.", + "exception" : true, + "members" : { + "Message" : { + "shape" : "String" + } + }, + "error" : { + "httpStatusCode" : 429 + } + }, + "UpdateConfigurationSetEventDestinationRequest" : { + "type" : "structure", + "members" : { + "ConfigurationSetName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ConfigurationSetName", + "documentation" : "ConfigurationSetName" + }, + "EventDestination" : { + "shape" : "EventDestinationDefinition" + }, + "EventDestinationName" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "EventDestinationName", + "documentation" : "EventDestinationName" + } + }, + "documentation" : "UpdateConfigurationSetEventDestinationRequest", + "required" : [ "EventDestinationName", "ConfigurationSetName" ] + }, + "UpdateConfigurationSetEventDestinationResponse" : { + "type" : "structure", + "members" : { }, + "documentation" : "An empty object that indicates that the event destination was updated successfully." + }, + "VoiceMessageContent" : { + "type" : "structure", + "members" : { + "CallInstructionsMessage" : { + "shape" : "CallInstructionsMessageType" + }, + "PlainTextMessage" : { + "shape" : "PlainTextMessageType" + }, + "SSMLMessage" : { + "shape" : "SSMLMessageType" + } + }, + "documentation" : "An object that contains a voice message and information about the recipient that you want to send it to." + }, + "WordCharactersWithDelimiters" : { + "type" : "string" + }, + "__boolean" : { + "type" : "boolean" + }, + "__double" : { + "type" : "double" + }, + "__integer" : { + "type" : "integer" + }, + "__long" : { + "type" : "long" + }, + "__string" : { + "type" : "string" + }, + "__timestampIso8601" : { + "type" : "timestamp", + "timestampFormat" : "iso8601" + }, + "__timestampUnix" : { + "type" : "timestamp", + "timestampFormat" : "unixTimestamp" + } + }, + "documentation" : "Pinpoint SMS and Voice Messaging public facing APIs" +} \ No newline at end of file diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 5eae4b6f..e74763f2 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -1867,6 +1867,22 @@ } ], "documentation" : "Get a Voice Channel" }, + "ListTagsForResource" : { + "name" : "ListTagsForResource", + "http" : { + "method" : "GET", + "requestUri" : "/v1/tags/{resource-arn}", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListTagsForResourceRequest" + }, + "output" : { + "shape" : "ListTagsForResourceResponse", + "documentation" : "200 response" + }, + "errors" : [ ] + }, "PhoneNumberValidate" : { "name" : "PhoneNumberValidate", "http" : { @@ -2077,6 +2093,30 @@ } ], "documentation" : "Used to send a message to a list of users." }, + "TagResource" : { + "name" : "TagResource", + "http" : { + "method" : "POST", + "requestUri" : "/v1/tags/{resource-arn}", + "responseCode" : 204 + }, + "input" : { + "shape" : "TagResourceRequest" + }, + "errors" : [ ] + }, + "UntagResource" : { + "name" : "UntagResource", + "http" : { + "method" : "DELETE", + "requestUri" : "/v1/tags/{resource-arn}", + "responseCode" : 204 + }, + "input" : { + "shape" : "UntagResourceRequest" + }, + "errors" : [ ] + }, "UpdateAdmChannel" : { "name" : "UpdateAdmChannel", "http" : { @@ -3301,6 +3341,10 @@ "ApplicationResponse" : { "type" : "structure", "members" : { + "Arn" : { + "shape" : "__string", + "documentation" : "The arn for the application." + }, "Id" : { "shape" : "__string", "documentation" : "The unique application ID." @@ -3308,6 +3352,11 @@ "Name" : { "shape" : "__string", "documentation" : "The display name of the application." + }, + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags", + "documentation" : "The Tags for the application." } }, "documentation" : "Application Response.", @@ -3631,6 +3680,10 @@ "shape" : "__string", "documentation" : "The ID of the application to which the campaign applies." }, + "Arn" : { + "shape" : "__string", + "documentation" : "The arn for the campaign." + }, "CreationDate" : { "shape" : "__string", "documentation" : "The date the campaign was created in ISO 8601 format." @@ -3691,6 +3744,11 @@ "shape" : "CampaignState", "documentation" : "The campaign status.\n\nAn A/B test campaign will have a status of COMPLETED only when all treatments have a status of COMPLETED." }, + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags", + "documentation" : "The Tags for the campaign." + }, "TreatmentDescription" : { "shape" : "__string", "documentation" : "A custom description for the treatment." @@ -3837,6 +3895,11 @@ "Name" : { "shape" : "__string", "documentation" : "The display name of the application. Used in the Amazon Pinpoint console." + }, + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags", + "documentation" : "The Tags for the app." } }, "documentation" : "Application Request.", @@ -4901,6 +4964,18 @@ "Event" : { "type" : "structure", "members" : { + "AppPackageName" : { + "shape" : "__string", + "documentation" : "The package name associated with the app that's recording the event." + }, + "AppTitle" : { + "shape" : "__string", + "documentation" : "The title of the app that's recording the event." + }, + "AppVersionCode" : { + "shape" : "__string", + "documentation" : "The version number of the app that's recording the event." + }, "Attributes" : { "shape" : "MapOf__string", "documentation" : "Custom attributes that are associated with the event you're adding or updating." @@ -4917,6 +4992,10 @@ "shape" : "MapOf__double", "documentation" : "Custom metrics related to the event." }, + "SdkName" : { + "shape" : "__string", + "documentation" : "The name of the SDK that's being used to record the event." + }, "Session" : { "shape" : "Session", "documentation" : "Information about the session in which the event occurred." @@ -6447,6 +6526,27 @@ "type" : "string", "enum" : [ "CREATED", "INITIALIZING", "PROCESSING", "COMPLETING", "COMPLETED", "FAILING", "FAILED" ] }, + "ListTagsForResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "resource-arn" + } + }, + "required" : [ "ResourceArn" ] + }, + "ListTagsForResourceResponse" : { + "type" : "structure", + "members" : { + "TagsModel" : { + "shape" : "TagsModel" + } + }, + "required" : [ "TagsModel" ], + "payload" : "TagsModel" + }, "Message" : { "type" : "structure", "members" : { @@ -7284,6 +7384,10 @@ "shape" : "__string", "documentation" : "The ID of the application that the segment applies to." }, + "Arn" : { + "shape" : "__string", + "documentation" : "The arn for the segment." + }, "CreationDate" : { "shape" : "__string", "documentation" : "The date and time when the segment was created." @@ -7316,6 +7420,11 @@ "shape" : "SegmentType", "documentation" : "The segment type:\nDIMENSIONAL - A dynamic segment built from selection criteria based on endpoint data reported by your app. You create this type of segment by using the segment builder in the Amazon Pinpoint console or by making a POST request to the segments resource.\nIMPORT - A static segment built from an imported set of endpoint definitions. You create this type of segment by importing a segment in the Amazon Pinpoint console or by making a POST request to the jobs/import resource." }, + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags", + "documentation" : "The Tags for the segment." + }, "Version" : { "shape" : "__integer", "documentation" : "The segment version number." @@ -7511,6 +7620,31 @@ "type" : "string", "enum" : [ "ALL", "ANY", "NONE" ] }, + "TagResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "resource-arn" + }, + "TagsModel" : { + "shape" : "TagsModel" + } + }, + "required" : [ "ResourceArn", "TagsModel" ], + "payload" : "TagsModel" + }, + "TagsModel" : { + "type" : "structure", + "members" : { + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags" + } + }, + "required" : [ "tags" ] + }, "TooManyRequestsException" : { "type" : "structure", "members" : { @@ -7568,6 +7702,23 @@ "type" : "string", "enum" : [ "ALL", "ANY", "NONE" ] }, + "UntagResourceRequest" : { + "type" : "structure", + "members" : { + "ResourceArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "resource-arn" + }, + "TagKeys" : { + "shape" : "ListOf__string", + "location" : "querystring", + "locationName" : "tagKeys", + "documentation" : "The key(s) of tag to be deleted" + } + }, + "required" : [ "TagKeys", "ResourceArn" ] + }, "UpdateAdmChannelRequest" : { "type" : "structure", "members" : { @@ -8138,6 +8289,11 @@ "shape" : "__integer", "documentation" : "The version of the segment to which the campaign sends messages." }, + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags", + "documentation" : "The Tags for the campaign." + }, "TreatmentDescription" : { "shape" : "__string", "documentation" : "A custom description for the treatment." @@ -8178,6 +8334,11 @@ "SegmentGroups" : { "shape" : "SegmentGroupList", "documentation" : "A segment group, which consists of zero or more source segments, plus dimensions that are applied to those source segments. Your request can only include one segment group. Your request can include either a SegmentGroups object or a Dimensions object, but not both." + }, + "tags": { + "shape" : "MapOf__string", + "locationName" : "tags", + "documentation" : "The Tags for the segments." } }, "documentation" : "Segment definition.", @@ -8462,4 +8623,4 @@ "timestampFormat" : "unixTimestamp" } } -} +} \ No newline at end of file diff --git a/botocore/data/polly/2016-06-10/paginators-1.json b/botocore/data/polly/2016-06-10/paginators-1.json index c24ff035..dc76e7c1 100644 --- a/botocore/data/polly/2016-06-10/paginators-1.json +++ b/botocore/data/polly/2016-06-10/paginators-1.json @@ -4,6 +4,17 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": "Voices" + }, + "ListLexicons": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Lexicons" + }, + "ListSpeechSynthesisTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SynthesisTasks" } } } diff --git a/botocore/data/ram/2018-01-04/paginators-1.json b/botocore/data/ram/2018-01-04/paginators-1.json index ea142457..ec438a09 100644 --- a/botocore/data/ram/2018-01-04/paginators-1.json +++ b/botocore/data/ram/2018-01-04/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "GetResourcePolicies": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "policies" + }, + "GetResourceShareAssociations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resourceShareAssociations" + }, + "GetResourceShareInvitations": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resourceShareInvitations" + }, + "GetResourceShares": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resourceShares" + }, + "ListPrincipals": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "principals" + }, + "ListResources": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "resources" + } + } } diff --git a/botocore/data/rds-data/2018-08-01/service-2.json b/botocore/data/rds-data/2018-08-01/service-2.json index 9058c641..d4971a30 100644 --- a/botocore/data/rds-data/2018-08-01/service-2.json +++ b/botocore/data/rds-data/2018-08-01/service-2.json @@ -1,68 +1,262 @@ { - "version": "2.0", - "metadata": { - "apiVersion": "2018-08-01", - "endpointPrefix": "rds-data", - "jsonVersion": "1.1", - "protocol": "rest-json", - "serviceFullName": "AWS RDS DataService", - "serviceId": "RDS Data", - "signatureVersion": "v4", - "signingName": "rds-data", - "uid": "rds-data-2018-08-01" - }, - "documentation": "AWS RDS DataService provides Http Endpoint to query RDS databases.", - "operations": { - "ExecuteSql": { - "name": "ExecuteSql", - "http": { - "method": "POST", - "requestUri": "/ExecuteSql", - "responseCode": 200 - }, - "input": { - "shape": "ExecuteSqlRequest" - }, - "output": { - "shape": "ExecuteSqlResponse" + "version": "2.0", + "metadata": { + "apiVersion": "2018-08-01", + "endpointPrefix": "rds-data", + "jsonVersion": "1.1", + "protocol": "rest-json", + "serviceFullName": "AWS RDS DataService", + "serviceId": "RDS Data", + "signatureVersion": "v4", + "signingName": "rds-data", + "uid": "rds-data-2018-08-01" + }, + "documentation": "AWS RDS DataService provides Http Endpoint to query RDS databases.", + "operations": { + "ExecuteSql": { + "name": "ExecuteSql", + "http": { + "method": "POST", + "requestUri": "/ExecuteSql", + "responseCode": 200 + }, + "input": { + "shape": "ExecuteSqlRequest" + }, + "output": { + "shape": "ExecuteSqlResponse" }, "errors": [ - { - "shape": "BadRequestException" + { + "shape": "BadRequestException" }, { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "ServiceUnavailableError" + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ServiceUnavailableError" + } + ], + "documentation": "Executes any SQL statement on the target database synchronously" } - ], - "documentation": "Executes any SQL statement on the target database synchronously" - } - }, - "shapes": { - "Boolean": { - "type": "boolean", - "box": true }, - "SqlStatementResult": { - "type": "structure", - "members": { - "numberOfRecordsUpdated": { - "shape": "Long", - "documentation": "Number of rows updated." - }, - "resultFrame": { - "shape": "ResultFrame", - "documentation": "ResultFrame returned by executing the sql statement" + "shapes": { + "Boolean": { + "type": "boolean", + "box": true + }, + "ForbiddenException": { + "type": "structure", + "members": { + "message": { + "shape": "String", + "documentation": "Error message" + } + }, + "documentation": "Access denied exception", + "exception": true, + "error": { + "code": "ForbiddenException", + "httpStatusCode": 403, + "senderFault": true } }, - "documentation": "SQL statement execution result" + "Value": { + "type": "structure", + "members": { + "arrayValues": { + "shape": "ArrayValues", + "documentation": "Arbitrarily nested arrays" + }, + "bigIntValue": { + "shape": "Long", + "documentation": "Long value" + }, + "bitValue": { + "shape": "Boolean", + "documentation": "Bit value" + }, + "blobValue": { + "shape": "Blob", + "documentation": "Blob value" + }, + "doubleValue": { + "shape": "Double", + "documentation": "Double value" + }, + "intValue": { + "shape": "Integer", + "documentation": "Integer value" + }, + "isNull": { + "shape": "Boolean", + "documentation": "Is column null" + }, + "realValue": { + "shape": "Float", + "documentation": "Float value" + }, + "stringValue": { + "shape": "String", + "documentation": "String value" + }, + "structValue": { + "shape": "StructValue", + "documentation": "Struct or UDT" + } + }, + "documentation": "Column value" + }, + "SqlStatementResults": { + "type": "list", + "member": { + "shape": "SqlStatementResult" + }, + "documentation": "SQL statement execution results" + }, + "ColumnMetadataList": { + "type": "list", + "member": { + "shape": "ColumnMetadata" + }, + "documentation": "List of Column metadata" + }, + "ResultFrame": { + "type": "structure", + "members": { + "records": { + "shape": "Records", + "documentation": "ResultSet Metadata." + }, + "resultSetMetadata": { + "shape": "ResultSetMetadata", + "documentation": "ResultSet Metadata." + } + }, + "documentation": "Result Frame" + }, + "Long": { + "type": "long", + "box": true + }, + "Row": { + "type": "list", + "member": { + "shape": "Value" + }, + "documentation": "List of column values" + }, + "String": { + "type": "string" + }, + "ArrayValues": { + "type": "list", + "member": { + "shape": "Value" + }, + "documentation": "Array value" + }, + "Float": { + "type": "float", + "box": true + }, + "ExecuteSqlResponse": { + "type": "structure", + "required": [ + "sqlStatementResults" + ], + "members": { + "sqlStatementResults": { + "shape": "SqlStatementResults", + "documentation": "Results returned by executing the sql statement(s)" + } + }, + "documentation": "Execute SQL response" + }, + "SqlStatementResult": { + "type": "structure", + "members": { + "numberOfRecordsUpdated": { + "shape": "Long", + "documentation": "Number of rows updated." + }, + "resultFrame": { + "shape": "ResultFrame", + "documentation": "ResultFrame returned by executing the sql statement" + } + }, + "documentation": "SQL statement execution result" + }, + "ResultSetMetadata": { + "type": "structure", + "members": { + "columnCount": { + "shape": "Long", + "documentation": "Number of columns" + }, + "columnMetadata": { + "shape": "ColumnMetadataList", + "documentation": "List of columns and their types" + } + }, + "documentation": "List of columns and their types." + }, + "Records": { + "type": "list", + "member": { + "shape": "Record" + }, + "documentation": "List of records" + }, + "ExecuteSqlRequest": { + "type": "structure", + "required": [ + "awsSecretStoreArn", + "dbClusterOrInstanceArn", + "sqlStatements" + ], + "members": { + "awsSecretStoreArn": { + "shape": "Arn", + "documentation": "ARN of the db credentials in AWS Secret Store or the friendly secret name" + }, + "database": { + "shape": "DbName", + "documentation": "Target DB name" + }, + "dbClusterOrInstanceArn": { + "shape": "Arn", + "documentation": "ARN of the target db cluster or instance" + }, + "schema": { + "shape": "DbName", + "documentation": "Target Schema name" + }, + "sqlStatements": { + "shape": "SqlStatement", + "documentation": "SQL statement(s) to be executed. Statements can be chained by using semicolons" + } + }, + "documentation": "Execute SQL Request" }, - "ForbiddenException": { + "Arn": { + "type": "string", + "max": 1024 + }, + "StructValue": { + "type": "structure", + "members": { + "attributes": { + "shape": "ArrayValues", + "documentation": "Struct or UDT" + } + }, + "documentation": "User Defined Type" + }, + "BadRequestException": { "type": "structure", "members": { "message": { @@ -70,308 +264,126 @@ "documentation": "Error message" } }, - "documentation": "Access denied exception", + "documentation": "Invalid Request exception", "exception": true, "error": { - "code": "ForbiddenException", - "httpStatusCode": 403, + "code": "BadRequestException", + "httpStatusCode": 400, "senderFault": true } }, - "Value": { - "type": "structure", - "members": { - "arrayValues": { - "shape": "ArrayValues", - "documentation": "Arbitrarily nested arrays" - }, - "bigIntValue": { - "shape": "Long", - "documentation": "Long value" - }, - "bitValue": { - "shape": "Boolean", - "documentation": "Bit value" - }, - "blobValue": { - "shape": "Blob", - "documentation": "Blob value" - }, - "doubleValue": { - "shape": "Double", - "documentation": "Double value" - }, - "intValue": { - "shape": "Integer", - "documentation": "Integer value" - }, - "isNull": { - "shape": "Boolean", - "documentation": "Is column null" - }, - "realValue": { - "shape": "Float", - "documentation": "Float value" - }, - "stringValue": { - "shape": "String", - "documentation": "String value" - }, - "structValue": { - "shape": "StructValue", - "documentation": "Struct or UDT" - } - }, - "documentation": "Column value" + "Blob": { + "type": "blob" }, - "SqlStatementResults": { - "type": "list", - "member": { - "shape": "SqlStatementResult" - }, - "documentation": "SQL statement execution results" + "SqlStatement": { + "type": "string", + "max": 65536 }, - "ColumnMetadataList": { - "type": "list", - "member": { - "shape": "ColumnMetadata" - }, - "documentation": "List of Column metadata" - }, - "ResultSetMetadata": { - "type": "structure", - "members": { - "columnCount": { - "shape": "Long", - "documentation": "Number of columns" - }, - "columnMetadata": { - "shape": "ColumnMetadataList", - "documentation": "List of columns and their types" - } - }, - "documentation": "List of columns and their types." - }, - "Records": { - "type": "list", - "member": { - "shape": "Record" - }, - "documentation": "List of records" - }, - "ResultFrame": { - "type": "structure", - "members": { - "records": { - "shape": "Records", - "documentation": "ResultSet Metadata." - }, - "resultSetMetadata": { - "shape": "ResultSetMetadata", - "documentation": "ResultSet Metadata." - } - }, - "documentation": "Result Frame" - }, - "ExecuteSqlRequest": { - "type": "structure", - "required": [ - "awsSecretStoreArn", - "dbClusterOrInstanceArn", - "sqlStatements" - ], - "members": { - "awsSecretStoreArn": { - "shape": "String", - "documentation": "ARN of the db credentials in AWS Secret Store or the friendly secret name" - }, - "database": { - "shape": "String", - "documentation": "Target DB name" - }, - "dbClusterOrInstanceArn": { - "shape": "String", - "documentation": "ARN of the target db cluster or instance" - }, - "schema": { - "shape": "String", - "documentation": "Target Schema name" - }, - "sqlStatements": { - "shape": "String", - "documentation": "SQL statement(s) to be executed. Statements can be chained by using semicolons" - } - }, - "documentation": "Execute SQL Request" - }, - "Long": { - "type": "long", + "Double": { + "type": "double", "box": true }, - "StructValue": { + "ServiceUnavailableError": { + "type": "structure", + "members": { }, + "documentation": "Internal service unavailable error", + "exception": true, + "error": { + "code": "ServiceUnavailableError", + "httpStatusCode": 503, + "fault": true + } + }, + "ColumnMetadata": { "type": "structure", "members": { - "attributes": { - "shape": "ArrayValues", - "documentation": "Struct or UDT" - } + "arrayBaseColumnType": { + "shape": "Integer", + "documentation": "Homogenous array base SQL type from java.sql.Types." }, - "documentation": "User Defined Type" - }, - "BadRequestException": { - "type": "structure", - "members": { - "message": { - "shape": "String", - "documentation": "Error message" - } - }, - "documentation": "Invalid Request exception", - "exception": true, - "error": { - "code": "BadRequestException", - "httpStatusCode": 400, - "senderFault": true + "isAutoIncrement": { + "shape": "Boolean", + "documentation": "Whether the designated column is automatically numbered" + }, + "isCaseSensitive": { + "shape": "Boolean", + "documentation": "Whether values in the designated column's case matters" + }, + "isCurrency": { + "shape": "Boolean", + "documentation": "Whether values in the designated column is a cash value" + }, + "isSigned": { + "shape": "Boolean", + "documentation": "Whether values in the designated column are signed numbers" + }, + "label": { + "shape": "String", + "documentation": "Usually specified by the SQL AS. If not specified, return column name." + }, + "name": { + "shape": "String", + "documentation": "Name of the column." + }, + "nullable": { + "shape": "Integer", + "documentation": "Indicates the nullability of values in the designated column. One of columnNoNulls (0), columnNullable (1), columnNullableUnknown (2)" + }, + "precision": { + "shape": "Integer", + "documentation": "Get the designated column's specified column size.For numeric data, this is the maximum precision. For character data, this is the length in characters. For datetime datatypes, this is the length in characters of the String representation (assuming the maximum allowed precision of the fractional seconds component). For binary data, this is the length in bytes. For the ROWID datatype, this is the length in bytes. 0 is returned for data types where the column size is not applicable." + }, + "scale": { + "shape": "Integer", + "documentation": "Designated column's number of digits to right of the decimal point. 0 is returned for data types where the scale is not applicable." + }, + "schemaName": { + "shape": "String", + "documentation": "Designated column's table's schema" + }, + "tableName": { + "shape": "String", + "documentation": "Designated column's table name" + }, + "type": { + "shape": "Integer", + "documentation": "SQL type from java.sql.Types." + }, + "typeName": { + "shape": "String", + "documentation": "Database-specific type name." } }, - "Blob": { - "type": "blob" + "documentation": "Column Metadata" + }, + "Integer": { + "type": "integer", + "box": true + }, + "DbName": { + "type": "string", + "max": 64 + }, + "Record": { + "type": "structure", + "members": { + "values": { + "shape": "Row", + "documentation": "Record" + } }, - "Row": { - "type": "list", - "member": { - "shape": "Value" - }, - "documentation": "List of column values" - }, - "String": { - "type": "string" - }, - "ArrayValues": { - "type": "list", - "member": { - "shape": "Value" - }, - "documentation": "Array value" - }, - "Double": { - "type": "double", - "box": true - }, - "ServiceUnavailableError": { - "type": "structure", - "members": { }, - "documentation": "Internal service unavailable error", - "exception": true, - "error": { - "code": "ServiceUnavailableError", - "httpStatusCode": 503, - "fault": true - } - }, - "ColumnMetadata": { - "type": "structure", - "members": { - "arrayBaseColumnType": { - "shape": "Integer", - "documentation": "Homogenous array base SQL type from java.sql.Types." - }, - "isAutoIncrement": { - "shape": "Boolean", - "documentation": "Whether the designated column is automatically numbered" - }, - "isCaseSensitive": { - "shape": "Boolean", - "documentation": "Whether values in the designated column's case matters" - }, - "isCurrency": { - "shape": "Boolean", - "documentation": "Whether values in the designated column is a cash value" - }, - "isSigned": { - "shape": "Boolean", - "documentation": "Whether values in the designated column are signed numbers" - }, - "label": { - "shape": "String", - "documentation": "Usually specified by the SQL AS. If not specified, return column name." - }, - "name": { - "shape": "String", - "documentation": "Name of the column." - }, - "nullable": { - "shape": "Integer", - "documentation": "Indicates the nullability of values in the designated column. One of columnNoNulls (0), columnNullable (1), columnNullableUnknown (2)" - }, - "precision": { - "shape": "Integer", - "documentation": "Get the designated column's specified column size.For numeric data, this is the maximum precision. For character data, this is the length in characters. For datetime datatypes, this is the length in characters of the String representation (assuming the maximum allowed precision of the fractional seconds component). For binary data, this is the length in bytes. For the ROWID datatype, this is the length in bytes. 0 is returned for data types where the column size is not applicable." - }, - "scale": { - "shape": "Integer", - "documentation": "Designated column's number of digits to right of the decimal point. 0 is returned for data types where the scale is not applicable." - }, - "schemaName": { - "shape": "String", - "documentation": "Designated column's table's schema" - }, - "tableName": { - "shape": "String", - "documentation": "Designated column's table name" - }, - "type": { - "shape": "Integer", - "documentation": "SQL type from java.sql.Types." - }, - "typeName": { - "shape": "String", - "documentation": "Database-specific type name." - } - }, - "documentation": "Column Metadata" - }, - "Integer": { - "type": "integer", - "box": true - }, - "Float": { - "type": "float", - "box": true - }, - "Record": { - "type": "structure", - "members": { - "values": { - "shape": "Row", - "documentation": "Record" - } - }, - "documentation": "Row or Record" - }, - "InternalServerErrorException": { - "type": "structure", - "members": { }, - "documentation": "Internal service error", - "exception": true, - "error": { - "code": "InternalServerErrorException", - "httpStatusCode": 500, - "fault": true - } - }, - "ExecuteSqlResponse": { - "type": "structure", - "required": [ - "sqlStatementResults" - ], - "members": { - "sqlStatementResults": { - "shape": "SqlStatementResults", - "documentation": "Results returned by executing the sql statement(s)" - } - }, - "documentation": "Execute SQL response" - } - } - } + "documentation": "Row or Record" + }, + "InternalServerErrorException": { + "type": "structure", + "members": { }, + "documentation": "Internal service error", + "exception": true, + "error": { + "code": "InternalServerErrorException", + "httpStatusCode": 500, + "fault": true + } + } + } +} diff --git a/botocore/data/rds/2014-10-31/paginators-1.json b/botocore/data/rds/2014-10-31/paginators-1.json index f273f358..b920e8cb 100644 --- a/botocore/data/rds/2014-10-31/paginators-1.json +++ b/botocore/data/rds/2014-10-31/paginators-1.json @@ -168,6 +168,12 @@ "limit_key": "NumberOfLines", "more_results": "AdditionalDataPending", "result_key": "LogFileData" + }, + "DescribeDBClusterEndpoints": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterEndpoints" } } } diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index ac33d1b7..a70d3ce3 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -25,7 +25,22 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterRoleQuotaExceededFault"} ], - "documentation":"

Associates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf in the Amazon Aurora User Guide.

" + "documentation":"

Associates an Identity and Access Management (IAM) role from an Amazon Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf in the Amazon Aurora User Guide.

" + }, + "AddRoleToDBInstance":{ + "name":"AddRoleToDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddRoleToDBInstanceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBInstanceRoleAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceRoleQuotaExceededFault"} + ], + "documentation":"

Associates an AWS Identity and Access Management (IAM) role with a DB instance.

" }, "AddSourceIdentifierToSubscription":{ "name":"AddSourceIdentifierToSubscription", @@ -1618,7 +1633,21 @@ {"shape":"DBClusterRoleNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Disassociates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf in the Amazon Aurora User Guide.

" + "documentation":"

Disassociates an AWS Identity and Access Management (IAM) role from an Amazon Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf in the Amazon Aurora User Guide.

" + }, + "RemoveRoleFromDBInstance":{ + "name":"RemoveRoleFromDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveRoleFromDBInstanceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBInstanceRoleNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Disassociates an AWS Identity and Access Management (IAM) role from a DB instance.

" }, "RemoveSourceIdentifierFromSubscription":{ "name":"RemoveSourceIdentifierFromSubscription", @@ -2037,6 +2066,28 @@ } } }, + "AddRoleToDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "RoleArn", + "FeatureName" + ], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB instance to associate the IAM role with.

" + }, + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with the DB instance, for example arn:aws:iam::123456789012:role/AccessRole.

" + }, + "FeatureName":{ + "shape":"String", + "documentation":"

The name of the feature for the DB instance that the IAM role is to be associated with. For the list of supported feature names, see DBEngineVersion.

" + } + } + }, "AddSourceIdentifierToSubscriptionMessage":{ "type":"structure", "required":[ @@ -2271,6 +2322,7 @@ "type":"structure", "members":{ }, + "documentation":"

The backup policy was not found.

", "deprecated":true, "deprecatedMessage":"Please avoid using this fault", "error":{ @@ -4135,6 +4187,10 @@ "SupportedEngineModes":{ "shape":"EngineModeList", "documentation":"

A list of the supported DB engine modes.

" + }, + "SupportedFeatureNames":{ + "shape":"FeatureNameList", + "documentation":"

A list of features supported by the DB engine. Supported feature names include the following.

  • s3Import

" } }, "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" @@ -4383,6 +4439,10 @@ "shape":"Boolean", "documentation":"

Indicates if the DB instance has deletion protection enabled. The database can't be deleted when this value is set to true. For more information, see Deleting a DB Instance.

" }, + "AssociatedRoles":{ + "shape":"DBInstanceRoles", + "documentation":"

The AWS Identity and Access Management (IAM) roles associated with the DB instance.

" + }, "ListenerEndpoint":{ "shape":"Endpoint", "documentation":"

Specifies the listener connection endpoint for SQL Server Always On.

" @@ -4580,6 +4640,67 @@ }, "exception":true }, + "DBInstanceRole":{ + "type":"structure", + "members":{ + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that is associated with the DB instance.

" + }, + "FeatureName":{ + "shape":"String", + "documentation":"

The name of the feature associated with the AWS Identity and Access Management (IAM) role. For the list of supported feature names, see DBEngineVersion.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Describes the state of association between the IAM role and the DB instance. The Status property returns one of the following values:

  • ACTIVE - the IAM role ARN is associated with the DB instance and can be used to access other AWS services on your behalf.

  • PENDING - the IAM role ARN is being associated with the DB instance.

  • INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable to assume the IAM role in order to access other AWS services on your behalf.

" + } + }, + "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a DB instance.

" + }, + "DBInstanceRoleAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified RoleArn or FeatureName value is already associated with the DB instance.

", + "error":{ + "code":"DBInstanceRoleAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceRoleNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified RoleArn value doesn't match the specifed feature for the DB instance.

", + "error":{ + "code":"DBInstanceRoleNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceRoleQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You can't associate any more AWS Identity and Access Management (IAM) roles with the DB instance because the quota has been reached.

", + "error":{ + "code":"DBInstanceRoleQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceRoles":{ + "type":"list", + "member":{ + "shape":"DBInstanceRole", + "locationName":"DBInstanceRole" + } + }, "DBInstanceStatusInfo":{ "type":"structure", "members":{ @@ -6710,6 +6831,10 @@ "DBCluster":{"shape":"DBCluster"} } }, + "FeatureNameList":{ + "type":"list", + "member":{"shape":"String"} + }, "Filter":{ "type":"structure", "required":[ @@ -7557,7 +7682,7 @@ }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", - "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance.

" + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance.

A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8750,6 +8875,28 @@ } } }, + "RemoveRoleFromDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "RoleArn", + "FeatureName" + ], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB instance to disassociate the IAM role from.

" + }, + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to disassociate from the DB instance, for example arn:aws:iam::123456789012:role/AccessRole.

" + }, + "FeatureName":{ + "shape":"String", + "documentation":"

The name of the feature for the DB instance that the IAM role is to be disassociated from. For the list of supported feature names, see DBEngineVersion.

" + } + } + }, "RemoveSourceIdentifierFromSubscriptionMessage":{ "type":"structure", "required":[ diff --git a/botocore/data/redshift/2012-12-01/paginators-1.json b/botocore/data/redshift/2012-12-01/paginators-1.json index 03027de3..e423444e 100644 --- a/botocore/data/redshift/2012-12-01/paginators-1.json +++ b/botocore/data/redshift/2012-12-01/paginators-1.json @@ -89,6 +89,48 @@ "output_token": "Marker", "limit_key": "MaxRecords", "result_key": "ReservedNodes" + }, + "DescribeClusterDbRevisions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "ClusterDbRevisions" + }, + "DescribeClusterTracks": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "MaintenanceTracks" + }, + "DescribeSnapshotCopyGrants": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "SnapshotCopyGrants" + }, + "DescribeSnapshotSchedules": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "SnapshotSchedules" + }, + "DescribeTableRestoreStatus": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "TableRestoreStatusDetails" + }, + "DescribeTags": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "TaggedResources" + }, + "GetReservedNodeExchangeOfferings": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "ReservedNodeOfferings" } } } diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 7edb26e1..f432b4fa 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -1556,6 +1556,13 @@ "locationName":"AccountWithRestoreAccess" } }, + "AssociatedClusterList":{ + "type":"list", + "member":{ + "shape":"ClusterAssociatedToSchedule", + "locationName":"ClusterAssociatedToSchedule" + } + }, "AttributeList":{ "type":"list", "member":{ @@ -1999,6 +2006,13 @@ }, "exception":true }, + "ClusterAssociatedToSchedule":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "ScheduleAssociationState":{"shape":"ScheduleState"} + } + }, "ClusterCredentials":{ "type":"structure", "members":{ @@ -3530,7 +3544,7 @@ }, "ClusterExists":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to return snapshots only for an existing cluster. Table-level restore can be performed only using a snapshot of an existing cluster, that is, a cluster that has not been deleted.

  • If ClusterExists is set to true, ClusterIdentifier is required.

  • If ClusterExists is set to false and ClusterIdentifier is not specified, all snapshots associated with deleted clusters (orphaned snapshots) are returned.

  • If ClusterExists is set to false and ClusterIdentifier is specified for a deleted cluster, snapshots associated with that cluster are returned.

  • If ClusterExists is set to false and ClusterIdentifier is specified for an existing cluster, no snapshots are returned.

" + "documentation":"

A value that indicates whether to return snapshots only for an existing cluster. Table-level restore can be performed only using a snapshot of an existing cluster, that is, a cluster that has not been deleted. If ClusterExists is set to true, ClusterIdentifier is required.

" }, "SortingEntities":{"shape":"SnapshotSortingEntityList"} }, @@ -6679,7 +6693,9 @@ "shape":"TagList", "documentation":"

An optional set of tags describing the schedule.

" }, - "NextInvocations":{"shape":"ScheduledSnapshotTimeList"} + "NextInvocations":{"shape":"ScheduledSnapshotTimeList"}, + "AssociatedClusterCount":{"shape":"IntegerOptional"}, + "AssociatedClusters":{"shape":"AssociatedClusterList"} }, "documentation":"

Describes a snapshot schedule. You can set a regular interval for creating snapshots of a cluster. You can also schedule snapshots for specific dates.

" }, diff --git a/botocore/data/rekognition/2016-06-27/service-2.json b/botocore/data/rekognition/2016-06-27/service-2.json index 886a9b5d..4826242c 100644 --- a/botocore/data/rekognition/2016-06-27/service-2.json +++ b/botocore/data/rekognition/2016-06-27/service-2.json @@ -48,7 +48,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Creates a collection in an AWS Region. You can add faces to the collection using the operation.

For example, you might create collections, one for each of your application users. A user can then index faces using the IndexFaces operation and persist results in a specific collection. Then, a user can search the collection for faces in the user-specific container.

When you create a collection, it is associated with the latest version of the face model version.

Collection names are case-sensitive.

This operation requires permissions to perform the rekognition:CreateCollection action.

" + "documentation":"

Creates a collection in an AWS Region. You can add faces to the collection using the IndexFaces operation.

For example, you might create collections, one for each of your application users. A user can then index faces using the IndexFaces operation and persist results in a specific collection. Then, a user can search the collection for faces in the user-specific container.

When you create a collection, it is associated with the latest version of the face model version.

Collection names are case-sensitive.

This operation requires permissions to perform the rekognition:CreateCollection action.

" }, "CreateStreamProcessor":{ "name":"CreateStreamProcessor", @@ -67,7 +67,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video.

Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition Video sends analysis results to Amazon Kinesis Data Streams.

You provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling with the Name field.

After you have finished analyzing a streaming video, use to stop processing. You can delete the stream processor by calling .

" + "documentation":"

Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video.

Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition Video sends analysis results to Amazon Kinesis Data Streams.

You provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field.

After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. You can delete the stream processor by calling DeleteStreamProcessor.

" }, "DeleteCollection":{ "name":"DeleteCollection", @@ -122,7 +122,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Deletes the stream processor identified by Name. You assign the value for Name when you create the stream processor with . You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor.

" + "documentation":"

Deletes the stream processor identified by Name. You assign the value for Name when you create the stream processor with CreateStreamProcessor. You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor.

" }, "DescribeCollection":{ "name":"DescribeCollection", @@ -158,7 +158,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Provides information about a stream processor created by . You can get information about the input and output streams, the input parameters for the face recognition being performed, and the current status of the stream processor.

" + "documentation":"

Provides information about a stream processor created by CreateStreamProcessor. You can get information about the input and output streams, the input parameters for the face recognition being performed, and the current status of the stream processor.

" }, "DetectFaces":{ "name":"DetectFaces", @@ -198,7 +198,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object.

{Name: lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: 79.2097}

{Name: sea,Confidence: 75.061}

In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

{Name: flower,Confidence: 99.0562}

{Name: plant,Confidence: 99.0562}

{Name: tulip,Confidence: 99.0562}

In this example, the detection algorithm more precisely identifies the flower as a tulip.

In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50%. You can also add the MaxLabels parameter to limit the number of labels returned.

If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

DetectLabels returns bounding boxes for instances of common object labels in an array of objects. An Instance object contains a object, for the location of the label on the image. It also includes the confidence by which the bounding box was detected.

DetectLabels also returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response returns the entire list of ancestors for a label. Each ancestor is a unique label in the response. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectLabels action.

" + "documentation":"

Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object.

{Name: lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: 79.2097}

{Name: sea,Confidence: 75.061}

In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

{Name: flower,Confidence: 99.0562}

{Name: plant,Confidence: 99.0562}

{Name: tulip,Confidence: 99.0562}

In this example, the detection algorithm more precisely identifies the flower as a tulip.

In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned.

If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

DetectLabels returns bounding boxes for instances of common object labels in an array of Instance objects. An Instance object contains a BoundingBox object, for the location of the label on the image. It also includes the confidence by which the bounding box was detected.

DetectLabels also returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response returns the entire list of ancestors for a label. Each ancestor is a unique label in the response. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectLabels action.

" }, "DetectModerationLabels":{ "name":"DetectModerationLabels", @@ -238,7 +238,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" + "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" }, "GetCelebrityInfo":{ "name":"GetCelebrityInfo", @@ -275,7 +275,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the celebrity recognition results for a Amazon Rekognition Video analysis started by .

Celebrity recognition in a video is an asynchronous operation. Analysis is started by a call to which returns a job identifier (JobId). When the celebrity recognition operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartCelebrityRecognition. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.

For more information, see Working With Stored Videos in the Amazon Rekognition Developer Guide.

GetCelebrityRecognition returns detected celebrities and the time(s) they are detected in an array (Celebrities) of objects. Each CelebrityRecognition contains information about the celebrity in a object and the time, Timestamp, the celebrity was detected.

GetCelebrityRecognition only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the Celebrities array is sorted by time (milliseconds from the start of the video). You can also sort the array by celebrity by specifying the value ID in the SortBy input parameter.

The CelebrityDetail object includes the celebrity identifer and additional information urls. If you don't store the additional information urls, you can get them later by calling with the celebrity identifer.

No information is returned for faces not recognized as celebrities.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetCelebrityDetection and populate the NextToken request parameter with the token value returned from the previous call to GetCelebrityRecognition.

" + "documentation":"

Gets the celebrity recognition results for a Amazon Rekognition Video analysis started by StartCelebrityRecognition.

Celebrity recognition in a video is an asynchronous operation. Analysis is started by a call to StartCelebrityRecognition which returns a job identifier (JobId). When the celebrity recognition operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartCelebrityRecognition. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.

For more information, see Working With Stored Videos in the Amazon Rekognition Developer Guide.

GetCelebrityRecognition returns detected celebrities and the time(s) they are detected in an array (Celebrities) of CelebrityRecognition objects. Each CelebrityRecognition contains information about the celebrity in a CelebrityDetail object and the time, Timestamp, the celebrity was detected.

GetCelebrityRecognition only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the Celebrities array is sorted by time (milliseconds from the start of the video). You can also sort the array by celebrity by specifying the value ID in the SortBy input parameter.

The CelebrityDetail object includes the celebrity identifer and additional information urls. If you don't store the additional information urls, you can get them later by calling GetCelebrityInfo with the celebrity identifer.

No information is returned for faces not recognized as celebrities.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetCelebrityDetection and populate the NextToken request parameter with the token value returned from the previous call to GetCelebrityRecognition.

" }, "GetContentModeration":{ "name":"GetContentModeration", @@ -294,7 +294,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by .

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling . which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" + "documentation":"

Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration. which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" }, "GetFaceDetection":{ "name":"GetFaceDetection", @@ -313,7 +313,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets face detection results for a Amazon Rekognition Video analysis started by .

Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceDetection.

GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.

" + "documentation":"

Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection.

Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection.

GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.

" }, "GetFaceSearch":{ "name":"GetFaceSearch", @@ -332,7 +332,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the face search results for Amazon Rekognition Video face search started by . The search returns faces in a collection that match the faces of persons detected in a video. It also includes the time(s) that faces are matched in the video.

Face search in a video is an asynchronous operation. You start face search by calling to which returns a job identifier (JobId). When the search operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceSearch. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch.

For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.

The search results are retured in an array, Persons, of objects. EachPersonMatch element contains details about the matching faces in the input collection, person information (facial attributes, bounding boxes, and person identifer) for the matched person, and the time the person was matched in the video.

GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the Persons array is sorted by the time, in milliseconds from the start of the video, persons are matched. You can also sort by persons by specifying INDEX for the SORTBY input parameter.

" + "documentation":"

Gets the face search results for Amazon Rekognition Video face search started by StartFaceSearch. The search returns faces in a collection that match the faces of persons detected in a video. It also includes the time(s) that faces are matched in the video.

Face search in a video is an asynchronous operation. You start face search by calling to StartFaceSearch which returns a job identifier (JobId). When the search operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceSearch. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch.

For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.

The search results are retured in an array, Persons, of PersonMatch objects. EachPersonMatch element contains details about the matching faces in the input collection, person information (facial attributes, bounding boxes, and person identifer) for the matched person, and the time the person was matched in the video.

GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the Persons array is sorted by the time, in milliseconds from the start of the video, persons are matched. You can also sort by persons by specifying INDEX for the SORTBY input parameter.

" }, "GetLabelDetection":{ "name":"GetLabelDetection", @@ -351,7 +351,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the label detection results of a Amazon Rekognition Video analysis started by .

The label detection operation is started by a call to which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartLabelDetection.

GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter.

The labels returned include the label name, the percentage confidence in the accuracy of the detected label, and the time the label was detected in the video.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.

GetLabelDetection doesn't return a hierarchical taxonomy, or bounding box information, for detected labels. GetLabelDetection returns null for the Parents and Instances attributes of the object which is returned in the Labels array.

" + "documentation":"

Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection.

The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection.

GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter.

The labels returned include the label name, the percentage confidence in the accuracy of the detected label, and the time the label was detected in the video.

The returned labels also include bounding box information for common objects, a hierarchical taxonomy of detected labels, and the version of the label model used for detection.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.

" }, "GetPersonTracking":{ "name":"GetPersonTracking", @@ -370,7 +370,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the path tracking results of a Amazon Rekognition Video analysis started by .

The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartPersonTracking.

GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

" + "documentation":"

Gets the path tracking results of a Amazon Rekognition Video analysis started by StartPersonTracking.

The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

" }, "IndexFaces":{ "name":"IndexFaces", @@ -391,7 +391,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the and operations.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

To get the number of faces in a collection, call .

If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image.

If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field.

To determine which version of the model you're using, call and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. By default, IndexFaces filters detected faces. You can also explicitly filter detected faces by specifying AUTO for the value of QualityFilter. If you do not want to filter detected faces, specify NONE.

To use quality filtering, you need a collection associated with version 3 of the face model. To get the version of the face model associated with a collection, call .

Information about faces detected in an image, but not indexed, is returned in an array of objects, UnindexedFaces. Faces aren't indexed for reasons such as:

  • The number of faces detected exceeds the value of the MaxFaces request parameter.

  • The face is too small compared to the image dimensions.

  • The face is too blurry.

  • The image is too dark.

  • The face has an extreme pose.

In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes:

  • The bounding box, BoundingBox, of the detected face.

  • A confidence value, Confidence, which indicates the confidence that the bounding box contains a face.

  • A face ID, faceId, assigned by the service for each face that's detected and stored.

  • An image ID, ImageId, assigned by the service for the input image.

If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes like gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" + "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

To get the number of faces in a collection, call DescribeCollection.

If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image.

If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field.

To determine which version of the model you're using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. By default, IndexFaces filters detected faces. You can also explicitly filter detected faces by specifying AUTO for the value of QualityFilter. If you do not want to filter detected faces, specify NONE.

To use quality filtering, you need a collection associated with version 3 of the face model. To get the version of the face model associated with a collection, call DescribeCollection.

Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such as:

  • The number of faces detected exceeds the value of the MaxFaces request parameter.

  • The face is too small compared to the image dimensions.

  • The face is too blurry.

  • The image is too dark.

  • The face has an extreme pose.

In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes:

  • The bounding box, BoundingBox, of the detected face.

  • A confidence value, Confidence, which indicates the confidence that the bounding box contains a face.

  • A face ID, FaceId, assigned by the service for each face that's detected and stored.

  • An image ID, ImageId, assigned by the service for the input image.

If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes like gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" }, "ListCollections":{ "name":"ListCollections", @@ -447,7 +447,7 @@ {"shape":"InvalidPaginationTokenException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Gets a list of stream processors that you have created with .

" + "documentation":"

Gets a list of stream processors that you have created with CreateStreamProcessor.

" }, "RecognizeCelebrities":{ "name":"RecognizeCelebrities", @@ -468,7 +468,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 100 faces in the image.

For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

" + "documentation":"

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 100 faces in the image.

For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the GetCelebrityInfo operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

" }, "SearchFaces":{ "name":"SearchFaces", @@ -507,7 +507,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the operation, and then use the face IDs returned in subsequent calls to the operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

" + "documentation":"

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the IndexFaces operation, and then use the face IDs returned in subsequent calls to the SearchFaces operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

" }, "StartCelebrityRecognition":{ "name":"StartCelebrityRecognition", @@ -528,7 +528,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous recognition of celebrities in a stored video.

Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition.

For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

", + "documentation":"

Starts asynchronous recognition of celebrities in a stored video.

Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition.

For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartContentModeration":{ @@ -550,7 +550,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

", + "documentation":"

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartFaceDetection":{ @@ -572,7 +572,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of faces in a stored video.

Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceDetection.

For more information, see Detecting Faces in a Stored Video in the Amazon Rekognition Developer Guide.

", + "documentation":"

Starts asynchronous detection of faces in a stored video.

Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection.

For more information, see Detecting Faces in a Stored Video in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartFaceSearch":{ @@ -595,7 +595,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see procedure-person-search-videos.

", + "documentation":"

Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see procedure-person-search-videos.

", "idempotent":true }, "StartLabelDetection":{ @@ -617,7 +617,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of labels in a stored video.

Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartLabelDetection.

", + "documentation":"

Starts asynchronous detection of labels in a stored video.

Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection.

", "idempotent":true }, "StartPersonTracking":{ @@ -639,7 +639,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts the asynchronous tracking of a person's path in a stored video.

Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartPersonTracking.

", + "documentation":"

Starts the asynchronous tracking of a person's path in a stored video.

Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

", "idempotent":true }, "StartStreamProcessor":{ @@ -659,7 +659,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Starts processing a stream processor. You create a stream processor by calling . To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor.

" + "documentation":"

Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor.

" }, "StopStreamProcessor":{ "name":"StopStreamProcessor", @@ -678,7 +678,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Stops a running stream processor that was created by .

" + "documentation":"

Stops a running stream processor that was created by CreateStreamProcessor.

" } }, "shapes":{ @@ -775,7 +775,7 @@ "documentation":"

The confidence, in percentage, that Amazon Rekognition has that the recognized face is the celebrity.

" } }, - "documentation":"

Provides information about a celebrity recognized by the operation.

" + "documentation":"

Provides information about a celebrity recognized by the RecognizeCelebrities operation.

" }, "CelebrityDetail":{ "type":"structure", @@ -879,11 +879,11 @@ "members":{ "SourceImage":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "TargetImage":{ "shape":"Image", - "documentation":"

The target image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The target image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "SimilarityThreshold":{ "shape":"Percent", @@ -1036,7 +1036,7 @@ }, "Name":{ "shape":"StreamProcessorName", - "documentation":"

An identifier you assign to the stream processor. You can use Name to manage the stream processor. For example, you can get the current status of the stream processor by calling . Name is idempotent.

" + "documentation":"

An identifier you assign to the stream processor. You can use Name to manage the stream processor. For example, you can get the current status of the stream processor by calling DescribeStreamProcessor. Name is idempotent.

" }, "Settings":{ "shape":"StreamProcessorSettings", @@ -1138,7 +1138,7 @@ "members":{ "FaceCount":{ "shape":"ULong", - "documentation":"

The number of faces that are indexed into the collection. To index faces into a collection, use .

" + "documentation":"

The number of faces that are indexed into the collection. To index faces into a collection, use IndexFaces.

" }, "FaceModelVersion":{ "shape":"String", @@ -1215,7 +1215,7 @@ "members":{ "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "Attributes":{ "shape":"Attributes", @@ -1242,7 +1242,7 @@ "members":{ "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. Images stored in an S3 Bucket do not need to be base64-encoded.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "MaxLabels":{ "shape":"UInteger", @@ -1250,7 +1250,7 @@ }, "MinConfidence":{ "shape":"Percent", - "documentation":"

Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value.

If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 50 percent.

" + "documentation":"

Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value.

If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent.

" } } }, @@ -1277,7 +1277,7 @@ "members":{ "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "MinConfidence":{ "shape":"Percent", @@ -1291,6 +1291,10 @@ "ModerationLabels":{ "shape":"ModerationLabels", "documentation":"

Array of detected Moderation labels and the time, in millseconds from the start of the video, they were detected.

" + }, + "ModerationModelVersion":{ + "shape":"String", + "documentation":"

Version number of the moderation detection model that was used to detect unsafe content.

" } } }, @@ -1300,7 +1304,7 @@ "members":{ "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.

" + "documentation":"

The input image as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" } } }, @@ -1475,7 +1479,7 @@ "documentation":"

Confidence level that the bounding box contains a face (and not a different object such as a tree). Default attribute.

" } }, - "documentation":"

Structure containing attributes of the face that the algorithm detected.

A FaceDetail object contains either the default facial attributes or all facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, Pose, and Quality.

is the only Amazon Rekognition Video stored video operation that can return a FaceDetail object with all attributes. To specify which attributes to return, use the FaceAttributes input parameter for . The following Amazon Rekognition Video operations return only the default attributes. The corresponding Start operations don't have a FaceAttributes input parameter.

  • GetCelebrityRecognition

  • GetPersonTracking

  • GetFaceSearch

The Amazon Rekognition Image and operations can return all facial attributes. To specify which attributes to return, use the Attributes input parameter for DetectFaces. For IndexFaces, use the DetectAttributes input parameter.

" + "documentation":"

Structure containing attributes of the face that the algorithm detected.

A FaceDetail object contains either the default facial attributes or all facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, Pose, and Quality.

GetFaceDetection is the only Amazon Rekognition Video stored video operation that can return a FaceDetail object with all attributes. To specify which attributes to return, use the FaceAttributes input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default attributes. The corresponding Start operations don't have a FaceAttributes input parameter.

  • GetCelebrityRecognition

  • GetPersonTracking

  • GetFaceSearch

The Amazon Rekognition Image DetectFaces and IndexFaces operations can return all facial attributes. To specify which attributes to return, use the Attributes input parameter for DetectFaces. For IndexFaces, use the DetectAttributes input parameter.

" }, "FaceDetailList":{ "type":"list", @@ -1565,7 +1569,7 @@ "documentation":"

Minimum face match confidence score that must be met to return a result for a recognized face. Default is 70. 0 is the lowest confidence. 100 is the highest confidence.

" } }, - "documentation":"

Input face recognition parameters for an Amazon Rekognition stream processor. FaceRecognitionSettings is a request parameter for .

" + "documentation":"

Input face recognition parameters for an Amazon Rekognition stream processor. FaceRecognitionSettings is a request parameter for CreateStreamProcessor.

" }, "FaceSearchSortBy":{ "type":"string", @@ -1608,7 +1612,7 @@ "documentation":"

Within the bounding box, a fine-grained polygon around the detected text.

" } }, - "documentation":"

Information about where the text detected by is located on an image.

" + "documentation":"

Information about where the text detected by DetectText is located on an image.

" }, "GetCelebrityInfoRequest":{ "type":"structure", @@ -1616,7 +1620,7 @@ "members":{ "Id":{ "shape":"RekognitionUniqueId", - "documentation":"

The ID for the celebrity. You get the celebrity ID from a call to the operation, which recognizes celebrities in an image.

" + "documentation":"

The ID for the celebrity. You get the celebrity ID from a call to the RecognizeCelebrities operation, which recognizes celebrities in an image.

" } } }, @@ -1724,6 +1728,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of moderation labels.

" + }, + "ModerationModelVersion":{ + "shape":"String", + "documentation":"

Version number of the moderation detection model that was used to detect unsafe content.

" } } }, @@ -1813,7 +1821,7 @@ }, "Persons":{ "shape":"PersonMatches", - "documentation":"

An array of persons, , in the video whose face(s) match the face(s) in an Amazon Rekognition collection. It also includes time information for when persons are matched in the video. You specify the input collection in an initial call to StartFaceSearch. Each Persons element includes a time the person was matched, face match details (FaceMatches) for matching faces in the collection, and person information (Person) for the matched person.

" + "documentation":"

An array of persons, PersonMatch, in the video whose face(s) match the face(s) in an Amazon Rekognition collection. It also includes time information for when persons are matched in the video. You specify the input collection in an initial call to StartFaceSearch. Each Persons element includes a time the person was matched, face match details (FaceMatches) for matching faces in the collection, and person information (Person) for the matched person.

" } } }, @@ -1861,6 +1869,10 @@ "Labels":{ "shape":"LabelDetections", "documentation":"

An array of labels detected in the video. Each element contains the detected label and the time, in milliseconds from the start of the video, that the label was detected.

" + }, + "LabelModelVersion":{ + "shape":"String", + "documentation":"

Version number of the label detection model that was used to detect labels.

" } } }, @@ -1975,7 +1987,7 @@ }, "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "ExternalImageId":{ "shape":"ExternalImageId", @@ -2004,7 +2016,7 @@ }, "OrientationCorrection":{ "shape":"OrientationCorrection", - "documentation":"

If your collection is associated with a face detection model that's later than version 3.0, the value of OrientationCorrection is always null and no orientation information is returned.

If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:

  • If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of OrientationCorrection is null.

  • If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.

Bounding box information is returned in the FaceRecords array. You can get the version of the face detection model by calling .

" + "documentation":"

If your collection is associated with a face detection model that's later than version 3.0, the value of OrientationCorrection is always null and no orientation information is returned.

If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:

  • If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of OrientationCorrection is null.

  • If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.

Bounding box information is returned in the FaceRecords array. You can get the version of the face detection model by calling DescribeCollection.

" }, "FaceModelVersion":{ "shape":"String", @@ -2025,10 +2037,10 @@ }, "Confidence":{ "shape":"Percent", - "documentation":"

The confidence that Amazon Rekognition Image has in the accuracy of the bounding box.

" + "documentation":"

The confidence that Amazon Rekognition has in the accuracy of the bounding box.

" } }, - "documentation":"

An instance of a label detected by .

" + "documentation":"

An instance of a label returned by Amazon Rekognition Image (DetectLabels) or by Amazon Rekognition Video (GetLabelDetection).

" }, "Instances":{ "type":"list", @@ -2123,14 +2135,14 @@ }, "Instances":{ "shape":"Instances", - "documentation":"

If Label represents an object, Instances contains the bounding boxes for each instance of the detected object. Bounding boxes are returned for common object labels such as people, cars, furniture, apparel or pets.

Amazon Rekognition Video does not support bounding box information for detected labels. The value of Instances is returned as null by GetLabelDetection.

" + "documentation":"

If Label represents an object, Instances contains the bounding boxes for each instance of the detected object. Bounding boxes are returned for common object labels such as people, cars, furniture, apparel or pets.

" }, "Parents":{ "shape":"Parents", - "documentation":"

The parent labels for a label. The response includes all ancestor labels.

Amazon Rekognition Video does not support a hierarchical taxonomy of detected labels. The value of Parents is returned as null by GetLabelDetection.

" + "documentation":"

The parent labels for a label. The response includes all ancestor labels.

" } }, - "documentation":"

Structure containing details about the detected label, including the name, and level of confidence.

The Amazon Rekognition Image operation operation returns a hierarchical taxonomy (Parents) for detected labels and also bounding box information (Instances) for detected labels. Amazon Rekognition Video doesn't return this information and returns null for the Parents and Instances attributes.

" + "documentation":"

Structure containing details about the detected label, including the name, detected instances, parent labels, and level of confidence.

" }, "LabelDetection":{ "type":"structure", @@ -2464,7 +2476,7 @@ "documentation":"

Details about a person whose path was tracked in a video.

" } }, - "documentation":"

Details and path tracking information for a single time a person's path is tracked in a video. Amazon Rekognition operations that track people's paths return an array of PersonDetection objects with elements for each time a person's path is tracked in a video.

For more information, see API_GetPersonTracking in the Amazon Rekognition Developer Guide.

" + "documentation":"

Details and path tracking information for a single time a person's path is tracked in a video. Amazon Rekognition operations that track people's paths return an array of PersonDetection objects with elements for each time a person's path is tracked in a video.

For more information, see GetPersonTracking in the Amazon Rekognition Developer Guide.

" }, "PersonDetections":{ "type":"list", @@ -2487,7 +2499,7 @@ "documentation":"

Information about the faces in the input collection that match the face of a person in the video.

" } }, - "documentation":"

Information about a person whose face matches a face(s) in an Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (), information about the person (PersonDetail), and the time stamp for when the person was detected in a video. An array of PersonMatch objects is returned by .

" + "documentation":"

Information about a person whose face matches a face(s) in an Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (FaceMatch), information about the person (PersonDetail), and the time stamp for when the person was detected in a video. An array of PersonMatch objects is returned by GetFaceSearch.

" }, "PersonMatches":{ "type":"list", @@ -2512,7 +2524,7 @@ "documentation":"

The value of the Y coordinate for a point on a Polygon.

" } }, - "documentation":"

The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.

An array of Point objects, Polygon, is returned by . Polygon represents a fine-grained polygon around detected text. For more information, see Geometry in the Amazon Rekognition Developer Guide.

" + "documentation":"

The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.

An array of Point objects, Polygon, is returned by DetectText. Polygon represents a fine-grained polygon around detected text. For more information, see Geometry in the Amazon Rekognition Developer Guide.

" }, "Polygon":{ "type":"list", @@ -2571,7 +2583,7 @@ "members":{ "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" } } }, @@ -2672,7 +2684,7 @@ }, "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the Bytes field. For more information, see Images in the Amazon Rekognition developer guide.

" }, "MaxFaces":{ "shape":"MaxFaces", @@ -2992,7 +3004,7 @@ "members":{ "Name":{ "shape":"StreamProcessorName", - "documentation":"

The name of a stream processor created by .

" + "documentation":"

The name of a stream processor created by CreateStreamProcessor.

" } } }, @@ -3013,7 +3025,7 @@ "documentation":"

Current status of the Amazon Rekognition stream processor.

" } }, - "documentation":"

An object that recognizes faces in a streaming video. An Amazon Rekognition stream processor is created by a call to . The request parameters for CreateStreamProcessor describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts.

" + "documentation":"

An object that recognizes faces in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request parameters for CreateStreamProcessor describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts.

" }, "StreamProcessorArn":{ "type":"string", @@ -3112,7 +3124,7 @@ "documentation":"

The location of the detected text on the image. Includes an axis aligned coarse bounding box surrounding the text and a finer grain polygon for more accurate spatial information.

" } }, - "documentation":"

Information about a word or line of text detected by .

The DetectedText field contains the text that Amazon Rekognition detected in the image.

Every word and line has an identifier (Id). Each word belongs to a line and has a parent identifier (ParentId) that identifies the line of text in which the word appears. The word Id is also an index for the word within a line of words.

For more information, see Detecting Text in the Amazon Rekognition Developer Guide.

" + "documentation":"

Information about a word or line of text detected by DetectText.

The DetectedText field contains the text that Amazon Rekognition detected in the image.

Every word and line has an identifier (Id). Each word belongs to a line and has a parent identifier (ParentId) that identifies the line of text in which the word appears. The word Id is also an index for the word within a line of words.

For more information, see Detecting Text in the Amazon Rekognition Developer Guide.

" }, "TextDetectionList":{ "type":"list", @@ -3154,7 +3166,7 @@ "documentation":"

The structure that contains attributes of a face that IndexFacesdetected, but didn't index.

" } }, - "documentation":"

A face that detected, but didn't index. Use the Reasons response attribute to determine why a face wasn't indexed.

" + "documentation":"

A face that IndexFaces detected, but didn't index. Use the Reasons response attribute to determine why a face wasn't indexed.

" }, "UnindexedFaces":{ "type":"list", @@ -3173,7 +3185,7 @@ "documentation":"

The Amazon S3 bucket name and file name for the video.

" } }, - "documentation":"

Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as use Video to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.

" + "documentation":"

Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as StartLabelDetection use Video to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.

" }, "VideoJobStatus":{ "type":"string", diff --git a/botocore/data/resource-groups/2017-11-27/service-2.json b/botocore/data/resource-groups/2017-11-27/service-2.json index 9bcc9540..4b5d000f 100644 --- a/botocore/data/resource-groups/2017-11-27/service-2.json +++ b/botocore/data/resource-groups/2017-11-27/service-2.json @@ -171,7 +171,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Adds specified tags to a resource with the specified ARN. Existing tags on a resource are not changed if they are not specified in the request parameters.

" + "documentation":"

Adds tags to a resource group with the specified ARN. Existing tags on a resource group are not changed if they are not specified in the request parameters.

" }, "Untag":{ "name":"Untag", diff --git a/botocore/data/robomaker/2018-06-29/paginators-1.json b/botocore/data/robomaker/2018-06-29/paginators-1.json index ea142457..314046d4 100644 --- a/botocore/data/robomaker/2018-06-29/paginators-1.json +++ b/botocore/data/robomaker/2018-06-29/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "ListDeploymentJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "deploymentJobs" + }, + "ListFleets": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "fleetDetails" + }, + "ListRobotApplications": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "robotApplicationSummaries" + }, + "ListRobots": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "robots" + }, + "ListSimulationApplications": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "simulationApplicationSummaries" + }, + "ListSimulationJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "simulationJobSummaries" + } + } } diff --git a/botocore/data/robomaker/2018-06-29/service-2.json b/botocore/data/robomaker/2018-06-29/service-2.json index fe8f9c19..078468e8 100644 --- a/botocore/data/robomaker/2018-06-29/service-2.json +++ b/botocore/data/robomaker/2018-06-29/service-2.json @@ -62,7 +62,7 @@ {"shape":"ConcurrentDeploymentException"}, {"shape":"IdempotentParameterMismatchException"} ], - "documentation":"

Creates a deployment job.

" + "documentation":"

Deploys a specific version of a robot application to robots in a fleet.

The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version.

" }, "CreateFleet":{ "name":"CreateFleet", @@ -450,6 +450,22 @@ ], "documentation":"

Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists all tags on a AWS RoboMaker resource.

" + }, "RegisterRobot":{ "name":"RegisterRobot", "http":{ @@ -503,6 +519,38 @@ ], "documentation":"

Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Adds or edits tags for a AWS RoboMaker resource.

Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings.

For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Removes the specified tags from the specified AWS RoboMaker resource.

To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource .

" + }, "UpdateRobotApplication":{ "name":"UpdateRobotApplication", "http":{ @@ -609,7 +657,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

", + "documentation":"

The failure percentage threshold percentage was met.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -637,6 +685,10 @@ "deploymentApplicationConfigs":{ "shape":"DeploymentApplicationConfigs", "documentation":"

The deployment application configuration.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the deployment job.

" } } }, @@ -665,7 +717,7 @@ }, "failureCode":{ "shape":"DeploymentJobErrorCode", - "documentation":"

The failure code of the deployment job if it failed.

" + "documentation":"

The failure code of the simulation job if it failed:

BadPermissionError

AWS Greengrass requires a service-level role permission to access other services. The role must include the AWSGreengrassResourceAccessRolePolicy managed policy.

ExtractingBundleFailure

The robot application could not be extracted from the bundle.

FailureThresholdBreached

The percentage of robots that could not be updated exceeded the percentage set for the deployment.

GreengrassDeploymentFailed

The robot application could not be deployed to the robot.

GreengrassGroupVersionDoesNotExist

The AWS Greengrass group or version associated with a robot is missing.

InternalServerError

An internal error has occurred. Retry your request, but if the problem persists, contact us with details.

MissingRobotApplicationArchitecture

The robot application does not have a source that matches the architecture of the robot.

MissingRobotDeploymentResource

One or more of the resources specified for the robot application are missing. For example, does the robot application have the correct launch package and launch file?

PostLaunchFileFailure

The post-launch script failed.

PreLaunchFileFailure

The pre-launch script failed.

ResourceNotFound

One or more deployment resources are missing. For example, do robot application source bundles still exist?

RobotDeploymentNoResponse

There is no response from the robot. It might not be powered on or connected to the internet.

" }, "createdAt":{ "shape":"CreatedAt", @@ -674,6 +726,10 @@ "deploymentConfig":{ "shape":"DeploymentConfig", "documentation":"

The deployment configuration.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the deployment job.

" } } }, @@ -684,6 +740,10 @@ "name":{ "shape":"Name", "documentation":"

The name of the fleet.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the fleet.

" } } }, @@ -701,6 +761,10 @@ "createdAt":{ "shape":"CreatedAt", "documentation":"

The time, in milliseconds since the epoch, when the fleet was created.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the fleet.

" } } }, @@ -723,6 +787,10 @@ "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", "documentation":"

The robot software suite used by the robot application.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the robot application.

" } } }, @@ -756,6 +824,10 @@ "revisionId":{ "shape":"RevisionId", "documentation":"

The revision id of the robot application.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the robot application.

" } } }, @@ -825,6 +897,10 @@ "greengrassGroupId":{ "shape":"Id", "documentation":"

The Greengrass group id.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the robot.

" } } }, @@ -850,6 +926,10 @@ "architecture":{ "shape":"Architecture", "documentation":"

The target architecture of the robot.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the robot.

" } } }, @@ -882,6 +962,10 @@ "renderingEngine":{ "shape":"RenderingEngine", "documentation":"

The rendering engine for the simulation application.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the simulation application.

" } } }, @@ -923,6 +1007,10 @@ "revisionId":{ "shape":"RevisionId", "documentation":"

The revision id of the simulation application.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the simulation application.

" } } }, @@ -1003,7 +1091,7 @@ }, "iamRole":{ "shape":"IamRole", - "documentation":"

The IAM role that allows the simulation instance to call the AWS APIs that are specified in its associated policies on your behalf. This is how credentials are passed in to your simulation job. See how to specify AWS security credentials for your application.

" + "documentation":"

The IAM role name that allows the simulation instance to call the AWS APIs that are specified in its associated policies on your behalf. This is how credentials are passed in to your simulation job.

" }, "failureBehavior":{ "shape":"FailureBehavior", @@ -1017,6 +1105,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

The simulation application to use in the simulation job.

" }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job.

" + }, "vpcConfig":{ "shape":"VPCConfig", "documentation":"

If your simulation job accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.

" @@ -1044,7 +1136,7 @@ }, "failureCode":{ "shape":"SimulationJobErrorCode", - "documentation":"

The failure code of the simulation job if it failed.

" + "documentation":"

The failure code of the simulation job if it failed:

InternalServiceError

Internal service error.

RobotApplicationCrash

Robot application exited abnormally.

SimulationApplicationCrash

Simulation application exited abnormally.

BadPermissionsRobotApplication

Robot application bundle could not be downloaded.

BadPermissionsSimulationApplication

Simulation application bundle could not be downloaded.

BadPermissionsS3Output

Unable to publish outputs to customer-provided S3 bucket.

BadPermissionsCloudwatchLogs

Unable to publish logs to customer-provided CloudWatch Logs resource.

SubnetIpLimitExceeded

Subnet IP limit exceeded.

ENILimitExceeded

ENI limit exceeded.

BadPermissionsUserCredentials

Unable to use the Role provided.

InvalidBundleRobotApplication

Robot bundle cannot be extracted (invalid format, bundling error, or other issue).

InvalidBundleSimulationApplication

Simulation bundle cannot be extracted (invalid format, bundling error, or other issue).

RobotApplicationVersionMismatchedEtag

Etag for RobotApplication does not match value during version creation.

SimulationApplicationVersionMismatchedEtag

Etag for SimulationApplication does not match value during version creation.

" }, "clientRequestToken":{ "shape":"ClientRequestToken", @@ -1074,6 +1166,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

The simulation application used by the simulation job.

" }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the simulation job.

" + }, "vpcConfig":{ "shape":"VPCConfigResponse", "documentation":"

Information about the vpc configuration.

" @@ -1159,15 +1255,15 @@ "members":{ "application":{ "shape":"Arn", - "documentation":"

The application.

" + "documentation":"

The Amazon Resource Name (ARN) of the robot application.

" }, "applicationVersion":{ - "shape":"Version", + "shape":"DeploymentVersion", "documentation":"

The version of the application.

" }, "launchConfig":{ "shape":"DeploymentLaunchConfig", - "documentation":"

The launch configuration, usually roslaunch.

" + "documentation":"

The launch configuration.

" } }, "documentation":"

Information about a deployment application configuration.

" @@ -1267,19 +1363,19 @@ }, "preLaunchFile":{ "shape":"GenericString", - "documentation":"

The deployment pre-launch file. This file will be executed prior to the deployment launch file.

" + "documentation":"

The deployment pre-launch file. This file will be executed prior to the launch file.

" }, "launchFile":{ "shape":"GenericString", - "documentation":"

The deployment launch file.

" + "documentation":"

The launch file name.

" }, "postLaunchFile":{ "shape":"GenericString", - "documentation":"

The deployment post-launch file. This file will be executed after the deployment launch file.

" + "documentation":"

The deployment post-launch file. This file will be executed after the launch file.

" }, "environmentVariables":{ "shape":"EnvironmentVariableMap", - "documentation":"

An array of key/value pairs specifying environment variables for the deployment application.

" + "documentation":"

An array of key/value pairs specifying environment variables for the robot application

" } }, "documentation":"

Configuration information for a deployment launch.

" @@ -1294,6 +1390,12 @@ "Succeeded" ] }, + "DeploymentVersion":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[0-9]*" + }, "DeregisterRobotRequest":{ "type":"structure", "required":[ @@ -1372,6 +1474,10 @@ "robotDeploymentSummary":{ "shape":"RobotDeploymentSummary", "documentation":"

A list of robot deployment summaries.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified deployment job.

" } } }, @@ -1415,6 +1521,10 @@ "lastDeploymentTime":{ "shape":"CreatedAt", "documentation":"

The time of the last deployment.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified fleet.

" } } }, @@ -1462,6 +1572,10 @@ "lastUpdatedAt":{ "shape":"LastUpdatedAt", "documentation":"

The time, in milliseconds since the epoch, when the robot application was last updated.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified robot application.

" } } }, @@ -1513,6 +1627,10 @@ "lastDeploymentTime":{ "shape":"CreatedAt", "documentation":"

The time of the last deployment job.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified robot.

" } } }, @@ -1568,6 +1686,10 @@ "lastUpdatedAt":{ "shape":"LastUpdatedAt", "documentation":"

The time, in milliseconds since the epoch, when the simulation application was last updated.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified simulation application.

" } } }, @@ -1606,7 +1728,11 @@ }, "failureCode":{ "shape":"SimulationJobErrorCode", - "documentation":"

The failure code of the simulation job if it failed:

InternalServiceError

Internal service error

RobotApplicationCrash

Robot application exited abnormally (segfault, etc.)

SimulationApplicationCrash

Simulation application exited abnormally (segfault, etc.)

BadPermissionsRobotApplication

Robot application bundle could not be downloaded

BadPermissionsSimulationApplication

Simulation application bundle could not be downloaded

BadPermissionsS3Output

Unable to publish outputs to customer-provided S3 bucket

BadPermissionsCloudwatchLogs

Unable to publish logs to customer-provided CloudWatch Logs resource

SubnetIpLimitExceeded

Subnet IP limit exceeded

ENILimitExceeded

ENI limit exceeded

BadPermissionsUserCredentials

Unable to use the Role provided

InvalidBundleRobotApplication

Robot bundle cannot be extracted (invalid format, bundling error, etc.)

InvalidBundleSimulationApplication

Simulation bundle cannot be extracted (invalid format, bundling error, etc.)

RobotApplicationVersionMismatchedEtag

Etag for RobotApplication does not match value during version creation

SimulationApplicationVersionMismatchedEtag

Etag for SimulationApplication does not match value during version creation

" + "documentation":"

The failure code of the simulation job if it failed:

InternalServiceError

Internal service error.

RobotApplicationCrash

Robot application exited abnormally.

SimulationApplicationCrash

Simulation application exited abnormally.

BadPermissionsRobotApplication

Robot application bundle could not be downloaded.

BadPermissionsSimulationApplication

Simulation application bundle could not be downloaded.

BadPermissionsS3Output

Unable to publish outputs to customer-provided S3 bucket.

BadPermissionsCloudwatchLogs

Unable to publish logs to customer-provided CloudWatch Logs resource.

SubnetIpLimitExceeded

Subnet IP limit exceeded.

ENILimitExceeded

ENI limit exceeded.

BadPermissionsUserCredentials

Unable to use the Role provided.

InvalidBundleRobotApplication

Robot bundle cannot be extracted (invalid format, bundling error, or other issue).

InvalidBundleSimulationApplication

Simulation bundle cannot be extracted (invalid format, bundling error, or other issue).

RobotApplicationVersionMismatchedEtag

Etag for RobotApplication does not match value during version creation.

SimulationApplicationVersionMismatchedEtag

Etag for SimulationApplication does not match value during version creation.

" + }, + "failureReason":{ + "shape":"GenericString", + "documentation":"

Details about why the simulation job failed. For more information about troubleshooting, see Troubleshooting.

" }, "clientRequestToken":{ "shape":"ClientRequestToken", @@ -1636,6 +1762,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

A list of simulation applications.

" }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified simulation job.

" + }, "vpcConfig":{ "shape":"VPCConfigResponse", "documentation":"

The VPC configuration.

" @@ -1783,7 +1913,7 @@ }, "launchFile":{ "shape":"GenericString", - "documentation":"

The launch file.

" + "documentation":"

The launch file name.

" }, "environmentVariables":{ "shape":"EnvironmentVariableMap", @@ -1990,6 +2120,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The AWS RoboMaker Amazon Resource Name (ARN) with tags to be listed.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The list of all tags added to the specified resource.

" + } + } + }, "MaxResults":{"type":"integer"}, "Name":{ "type":"string", @@ -2093,7 +2244,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

The specified resource already exists

", + "documentation":"

The specified resource already exists.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -2417,6 +2568,10 @@ "shape":"SimulationJobErrorCode", "documentation":"

The failure code of the simulation job if it failed.

" }, + "failureReason":{ + "shape":"GenericString", + "documentation":"

The reason why the simulation job failed.

" + }, "clientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique identifier for this SimulationJob request.

" @@ -2445,6 +2600,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

A list of simulation applications.

" }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job.

" + }, "vpcConfig":{ "shape":"VPCConfigResponse", "documentation":"

VPC configuration information.

" @@ -2468,7 +2627,10 @@ "InvalidBundleRobotApplication", "InvalidBundleSimulationApplication", "RobotApplicationVersionMismatchedEtag", - "SimulationApplicationVersionMismatchedEtag" + "SimulationApplicationVersionMismatchedEtag", + "WrongRegionS3Output", + "WrongRegionRobotApplication", + "WrongRegionSimulationApplication" ] }, "SimulationJobStatus":{ @@ -2650,7 +2812,7 @@ }, "failureCode":{ "shape":"DeploymentJobErrorCode", - "documentation":"

The failure code if the job fails.

" + "documentation":"

The failure code if the job fails:

InternalServiceError

Internal service error.

RobotApplicationCrash

Robot application exited abnormally.

SimulationApplicationCrash

Simulation application exited abnormally.

BadPermissionsRobotApplication

Robot application bundle could not be downloaded.

BadPermissionsSimulationApplication

Simulation application bundle could not be downloaded.

BadPermissionsS3Output

Unable to publish outputs to customer-provided S3 bucket.

BadPermissionsCloudwatchLogs

Unable to publish logs to customer-provided CloudWatch Logs resource.

SubnetIpLimitExceeded

Subnet IP limit exceeded.

ENILimitExceeded

ENI limit exceeded.

BadPermissionsUserCredentials

Unable to use the Role provided.

InvalidBundleRobotApplication

Robot bundle cannot be extracted (invalid format, bundling error, or other issue).

InvalidBundleSimulationApplication

Simulation bundle cannot be extracted (invalid format, bundling error, or other issue).

RobotApplicationVersionMismatchedEtag

Etag for RobotApplication does not match value during version creation.

SimulationApplicationVersionMismatchedEtag

Etag for SimulationApplication does not match value during version creation.

" }, "createdAt":{ "shape":"CreatedAt", @@ -2658,6 +2820,53 @@ } } }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9 _.\\-\\/+=:]*" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS RoboMaker resource you are tagging.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[a-zA-Z0-9 _.\\-\\/+=:]*" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -2667,6 +2876,32 @@ "error":{"httpStatusCode":400}, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS RoboMaker resource you are removing tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

A map that contains tag keys and tag values that will be unattached from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateRobotApplicationRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/route53/2013-04-01/paginators-1.json b/botocore/data/route53/2013-04-01/paginators-1.json index e0d2b2c0..8d8e433d 100644 --- a/botocore/data/route53/2013-04-01/paginators-1.json +++ b/botocore/data/route53/2013-04-01/paginators-1.json @@ -38,6 +38,12 @@ "result_key": [ "VPCs" ] + }, + "ListQueryLoggingConfigs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "QueryLoggingConfigs" } } } diff --git a/botocore/data/route53domains/2014-05-15/paginators-1.json b/botocore/data/route53domains/2014-05-15/paginators-1.json index 8d1a73ab..7ccbf9da 100644 --- a/botocore/data/route53domains/2014-05-15/paginators-1.json +++ b/botocore/data/route53domains/2014-05-15/paginators-1.json @@ -12,6 +12,12 @@ "input_token": "Marker", "output_token": "NextPageMarker", "result_key": "Operations" + }, + "ViewBilling": { + "input_token": "Marker", + "limit_key": "MaxItems", + "output_token": "NextPageMarker", + "result_key": "BillingRecords" } } } diff --git a/botocore/data/route53resolver/2018-04-01/paginators-1.json b/botocore/data/route53resolver/2018-04-01/paginators-1.json index ea142457..5a36213a 100644 --- a/botocore/data/route53resolver/2018-04-01/paginators-1.json +++ b/botocore/data/route53resolver/2018-04-01/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" + } + } } diff --git a/botocore/data/sagemaker/2017-07-24/paginators-1.json b/botocore/data/sagemaker/2017-07-24/paginators-1.json index f6d33286..dc7a411f 100644 --- a/botocore/data/sagemaker/2017-07-24/paginators-1.json +++ b/botocore/data/sagemaker/2017-07-24/paginators-1.json @@ -35,6 +35,84 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "ListAlgorithms": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AlgorithmSummaryList" + }, + "ListCodeRepositories": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CodeRepositorySummaryList" + }, + "ListCompilationJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CompilationJobSummaries" + }, + "ListHyperParameterTuningJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "HyperParameterTuningJobSummaries" + }, + "ListLabelingJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LabelingJobSummaryList" + }, + "ListLabelingJobsForWorkteam": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LabelingJobSummaryList" + }, + "ListModelPackages": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ModelPackageSummaryList" + }, + "ListNotebookInstanceLifecycleConfigs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "NotebookInstanceLifecycleConfigs" + }, + "ListSubscribedWorkteams": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SubscribedWorkteams" + }, + "ListTrainingJobsForHyperParameterTuningJob": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TrainingJobSummaries" + }, + "ListTransformJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TransformJobSummaries" + }, + "ListWorkteams": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Workteams" + }, + "Search": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Results" } } } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index c533cede..7c106a60 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -42,7 +42,7 @@ }, "input":{"shape":"CreateCodeRepositoryInput"}, "output":{"shape":"CreateCodeRepositoryOutput"}, - "documentation":"

Create a git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use git source control for the notebooks you create. The git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other git repository.

" + "documentation":"

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

" }, "CreateCompilationJob":{ "name":"CreateCompilationJob", @@ -229,7 +229,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteCodeRepositoryInput"}, - "documentation":"

Deletes the specified git repository from your account.

" + "documentation":"

Deletes the specified Git repository from your account.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -326,7 +326,7 @@ }, "input":{"shape":"DescribeCodeRepositoryInput"}, "output":{"shape":"DescribeCodeRepositoryOutput"}, - "documentation":"

Gets details about the specified git repository.

" + "documentation":"

Gets details about the specified Git repository.

" }, "DescribeCompilationJob":{ "name":"DescribeCompilationJob", @@ -501,7 +501,7 @@ }, "input":{"shape":"ListCodeRepositoriesInput"}, "output":{"shape":"ListCodeRepositoriesOutput"}, - "documentation":"

Gets a list of the git repositories in your account.

" + "documentation":"

Gets a list of the Git repositories in your account.

" }, "ListCompilationJobs":{ "name":"ListCompilationJobs", @@ -778,7 +778,7 @@ }, "input":{"shape":"UpdateCodeRepositoryInput"}, "output":{"shape":"UpdateCodeRepositoryOutput"}, - "documentation":"

Updates the specified git repository with the specified values.

" + "documentation":"

Updates the specified Git repository with the specified values.

" }, "UpdateEndpoint":{ "name":"UpdateEndpoint", @@ -791,7 +791,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You cannot update an endpoint with the current EndpointConfig. To update an endpoint, you must create a new EndpointConfig.

" + "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You cannot update an endpoint with the current EndpointConfig. To update an endpoint, you must create a new EndpointConfig.

" }, "UpdateEndpointWeightsAndCapacities":{ "name":"UpdateEndpointWeightsAndCapacities", @@ -1286,26 +1286,26 @@ "members":{ "CodeRepositoryName":{ "shape":"EntityName", - "documentation":"

The name of the git repository.

" + "documentation":"

The name of the Git repository.

" }, "CodeRepositoryArn":{ "shape":"CodeRepositoryArn", - "documentation":"

The Amazon Resource Name (ARN) of the git repository.

" + "documentation":"

The Amazon Resource Name (ARN) of the Git repository.

" }, "CreationTime":{ "shape":"CreationTime", - "documentation":"

The date and time that the git repository was created.

" + "documentation":"

The date and time that the Git repository was created.

" }, "LastModifiedTime":{ "shape":"LastModifiedTime", - "documentation":"

The date and time that the git repository was last modified.

" + "documentation":"

The date and time that the Git repository was last modified.

" }, "GitConfig":{ "shape":"GitConfig", - "documentation":"

Configuration details for the git repository, including the URL where it is located and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

" + "documentation":"

Configuration details for the Git repository, including the URL where it is located and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

" } }, - "documentation":"

Specifies summary information about a git repository.

" + "documentation":"

Specifies summary information about a Git repository.

" }, "CodeRepositorySummaryList":{ "type":"list", @@ -1560,7 +1560,7 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

Whether to certify the algorithm so that it can be listed in AWS Marektplace.

" + "documentation":"

Whether to certify the algorithm so that it can be listed in AWS Marketplace.

" } } }, @@ -1583,7 +1583,7 @@ "members":{ "CodeRepositoryName":{ "shape":"EntityName", - "documentation":"

The name of the git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

" + "documentation":"

The name of the Git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

" }, "GitConfig":{ "shape":"GitConfig", @@ -1731,7 +1731,7 @@ }, "WarmStartConfig":{ "shape":"HyperParameterTuningJobWarmStartConfig", - "documentation":"

Specifies configuration for starting the hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job.

All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType for the warm start configuration, the training job that performs the best in the new tuning job is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job.

All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job.

" + "documentation":"

Specifies the configuration for starting the hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job.

All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType value for the warm start configuration, the training job that performs the best in the new tuning job is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job.

All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job.

" }, "Tags":{ "shape":"TagList", @@ -1949,15 +1949,15 @@ }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook intance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

A git repository to associate with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to 3 git repositories to associate with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" } } }, @@ -2073,7 +2073,11 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specifed VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + }, + "EnableInterContainerTrafficEncryption":{ + "shape":"Boolean", + "documentation":"

To encrypt all communications between ML compute instances in distributed training, choose True,. Encryption provides greater security for distributed training, but training can take longer because of additional communications between ML compute instances.

" } } }, @@ -2107,7 +2111,7 @@ }, "MaxConcurrentTransforms":{ "shape":"MaxConcurrentTransforms", - "documentation":"

The maximum number of parallel requests that can be sent to each instance in a transform job. This is good for algorithms that implement multiple workers on larger instances . The default value is 1. To allow Amazon SageMaker to determine the appropriate number for MaxConcurrentTransforms, set the value to 0.

" + "documentation":"

The maximum number of parallel requests that can be sent to an algorithm container on an instance. This is good for algorithms that implement multiple workers on larger instances . The default value is 1. To allow Amazon SageMaker to determine the appropriate number for MaxConcurrentTransforms, do not set the value in the API.

" }, "MaxPayloadInMB":{ "shape":"MaxPayloadInMB", @@ -2115,7 +2119,7 @@ }, "BatchStrategy":{ "shape":"BatchStrategy", - "documentation":"

Determines the number of records included in a single mini-batch. SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.

Batch transform will automatically split your input data into whatever payload size is specified if you set SplitType to Line and BatchStrategy to MultiRecord. There's no need to split the dataset into smaller files or to use larger payload sizes unless the records in your dataset are very large.

" + "documentation":"

Determines the number of records to include in a mini-batch. If you want to include only one record in a mini-batch, specify SingleRecord.. If you want mini-batches to contain a maximum of the number of records specified in the MaxPayloadInMB parameter, specify MultiRecord.

If you set SplitType to Line and BatchStrategy to MultiRecord, a batch transform automatically splits your input data into the specified payload size. There's no need to split the dataset into smaller files or to use larger payload sizes unless the records in your dataset are very large.

" }, "Environment":{ "shape":"TransformEnvironmentMap", @@ -2135,7 +2139,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. Adding tags is optional. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" } } }, @@ -2217,7 +2221,7 @@ "members":{ "CodeRepositoryName":{ "shape":"EntityName", - "documentation":"

The name of the git repository to delete.

" + "documentation":"

The name of the Git repository to delete.

" } } }, @@ -2408,7 +2412,7 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

Whether the algorithm is certified to be listed in AWS Marektplace.

" + "documentation":"

Whether the algorithm is certified to be listed in AWS Marketplace.

" } } }, @@ -2418,7 +2422,7 @@ "members":{ "CodeRepositoryName":{ "shape":"EntityName", - "documentation":"

The name of the git repository to describe.

" + "documentation":"

The name of the Git repository to describe.

" } } }, @@ -2433,11 +2437,11 @@ "members":{ "CodeRepositoryName":{ "shape":"EntityName", - "documentation":"

The name of the git repository.

" + "documentation":"

The name of the Git repository.

" }, "CodeRepositoryArn":{ "shape":"CodeRepositoryArn", - "documentation":"

The Amazon Resource Name (ARN) of the git repository.

" + "documentation":"

The Amazon Resource Name (ARN) of the Git repository.

" }, "CreationTime":{ "shape":"CreationTime", @@ -2983,7 +2987,7 @@ }, "NotebookInstanceName":{ "shape":"NotebookInstanceName", - "documentation":"

Name of the Amazon SageMaker notebook instance.

" + "documentation":"

The name of the Amazon SageMaker notebook instance.

" }, "NotebookInstanceStatus":{ "shape":"NotebookInstanceStatus", @@ -2991,7 +2995,7 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

If status is failed, the reason it failed.

" + "documentation":"

If status is Failed, the reason it failed.

" }, "Url":{ "shape":"NotebookInstanceUrl", @@ -3011,15 +3015,15 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

Amazon Resource Name (ARN) of the IAM role associated with the instance.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role associated with the instance.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

" + "documentation":"

The AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

" }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

Network interface IDs that Amazon SageMaker created at the time of creating the instance.

" + "documentation":"

The network interface IDs that Amazon SageMaker created at the time of creating the instance.

" }, "LastModifiedTime":{ "shape":"LastModifiedTime", @@ -3047,11 +3051,11 @@ }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

The git repository associated with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to 3 git repositories associated with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" } } }, @@ -3189,7 +3193,11 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

If True, inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specifed VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + }, + "EnableInterContainerTrafficEncryption":{ + "shape":"Boolean", + "documentation":"

To encrypt all communications between ML compute instances in distributed training, specify True. Encryption provides greater security for distributed training, but training take longer because of the additional communications between ML compute instances.

" } } }, @@ -3241,11 +3249,11 @@ }, "MaxPayloadInMB":{ "shape":"MaxPayloadInMB", - "documentation":"

The maximum payload size , in MB used in the transform job.

" + "documentation":"

The maximum payload size, in MB, used in the transform job.

" }, "BatchStrategy":{ "shape":"BatchStrategy", - "documentation":"

SingleRecord means only one record was used per a batch. MultiRecord means batches contained as many records that could possibly fit within the MaxPayloadInMB limit.

" + "documentation":"

If you want to include only one record in a batch, specify SingleRecord.. If you want batches to contain a maximum of the number of records specified in the MaxPayloadInMB parameter, specify MultiRecord.S

" }, "Environment":{ "shape":"TransformEnvironmentMap", @@ -3273,7 +3281,7 @@ }, "TransformEndTime":{ "shape":"Timestamp", - "documentation":"

Indicates when the transform job is Completed, Stopped, or Failed. You are billed for the time interval between this time and the value of TransformStartTime.

" + "documentation":"

Indicates when the transform job has been completed, or has stopped or failed. You are billed for the time interval between this time and the value of TransformStartTime.

" }, "LabelingJobArn":{ "shape":"LabelingJobArn", @@ -3490,7 +3498,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" }, "EnvironmentKey":{ "type":"string", @@ -3609,18 +3617,18 @@ "members":{ "RepositoryUrl":{ "shape":"GitConfigUrl", - "documentation":"

The URL where the git repository is located.

" + "documentation":"

The URL where the Git repository is located.

" }, "Branch":{ "shape":"Branch", - "documentation":"

The default beach for the git repository.

" + "documentation":"

The default branch for the Git repository.

" }, "SecretArn":{ "shape":"SecretArn", "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

" } }, - "documentation":"

Specifies configuration details for a git repository in your AWS account.

" + "documentation":"

Specifies configuration details for a Git repository in your AWS account.

" }, "GitConfigForUpdate":{ "type":"structure", @@ -3630,7 +3638,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

" } }, - "documentation":"

Specifies configuration details for a git repository when the repository is updated.

" + "documentation":"

Specifies configuration details for a Git repository when the repository is updated.

" }, "GitConfigUrl":{ "type":"string", @@ -3812,6 +3820,10 @@ "EnableNetworkIsolation":{ "shape":"Boolean", "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + }, + "EnableInterContainerTrafficEncryption":{ + "shape":"Boolean", + "documentation":"

To encrypt all communications between ML compute instances in distributed training, specify True. Encryption provides greater security for distributed training, but training take longer because of the additional communications between ML compute instances.

" } }, "documentation":"

Defines the training jobs launched by a hyperparameter tuning job.

" @@ -3852,7 +3864,7 @@ }, "TrainingEndTime":{ "shape":"Timestamp", - "documentation":"

The date and time that the training job ended.

" + "documentation":"

Specifies the time when the training job ends on training instances. You are billed for the time interval between the value of TrainingStartTime and this time. For successful jobs and stopped jobs, this is the time after model artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker detects a job failure.

" }, "TrainingJobStatus":{ "shape":"TrainingJobStatus", @@ -4297,7 +4309,10 @@ }, "documentation":"

Provides counts for human-labeled tasks in the labeling job.

" }, - "LabelingJobAlgorithmSpecificationArn":{"type":"string"}, + "LabelingJobAlgorithmSpecificationArn":{ + "type":"string", + "max":2048 + }, "LabelingJobAlgorithmsConfig":{ "type":"structure", "required":["LabelingJobAlgorithmSpecificationArn"], @@ -4600,31 +4615,31 @@ "members":{ "CreationTimeAfter":{ "shape":"CreationTime", - "documentation":"

A filter that returns only git repositories that were created after the specified time.

" + "documentation":"

A filter that returns only Git repositories that were created after the specified time.

" }, "CreationTimeBefore":{ "shape":"CreationTime", - "documentation":"

A filter that returns only git repositories that were created before the specified time.

" + "documentation":"

A filter that returns only Git repositories that were created before the specified time.

" }, "LastModifiedTimeAfter":{ "shape":"Timestamp", - "documentation":"

A filter that returns only git repositories that were last modified after the specified time.

" + "documentation":"

A filter that returns only Git repositories that were last modified after the specified time.

" }, "LastModifiedTimeBefore":{ "shape":"Timestamp", - "documentation":"

A filter that returns only git repositories that were last modified before the specified time.

" + "documentation":"

A filter that returns only Git repositories that were last modified before the specified time.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of git repositories to return in the response.

" + "documentation":"

The maximum number of Git repositories to return in the response.

" }, "NameContains":{ "shape":"CodeRepositoryNameContains", - "documentation":"

A string in the git repositories name. This filter returns only repositories whose name contains the specified string.

" + "documentation":"

A string in the Git repositories name. This filter returns only repositories whose name contains the specified string.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the result of a ListCodeRepositoriesOutput request was truncated, the response includes a NextToken. To get the next set of git repositories, use the token in the next request.

" + "documentation":"

If the result of a ListCodeRepositoriesOutput request was truncated, the response includes a NextToken. To get the next set of Git repositories, use the token in the next request.

" }, "SortBy":{ "shape":"CodeRepositorySortBy", @@ -4642,11 +4657,11 @@ "members":{ "CodeRepositorySummaryList":{ "shape":"CodeRepositorySummaryList", - "documentation":"

Gets a list of summaries of the git repositories. Each summary specifies the following values for the repository:

  • Name

  • Amazon Resource Name (ARN)

  • Creation time

  • Last modified time

  • Configuration information, including the URL location of the repository and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

" + "documentation":"

Gets a list of summaries of the Git repositories. Each summary specifies the following values for the repository:

  • Name

  • Amazon Resource Name (ARN)

  • Creation time

  • Last modified time

  • Configuration information, including the URL location of the repository and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the result of a ListCodeRepositoriesOutput request was truncated, the response includes a NextToken. To get the next set of git repositories, use the token in the next request.

" + "documentation":"

If the result of a ListCodeRepositoriesOutput request was truncated, the response includes a NextToken. To get the next set of Git repositories, use the token in the next request.

" } } }, @@ -5151,7 +5166,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

If the previous call to the ListNotebookInstances is truncated, the response includes a NextToken. You can use this token in your subsequent ListNotebookInstances request to fetch the next set of notebook instances.

You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.

" + "documentation":"

If the previous call to the ListNotebookInstances is truncated, the response includes a NextToken. You can use this token in your subsequent ListNotebookInstances request to fetch the next set of notebook instances.

You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.

" }, "MaxResults":{ "shape":"MaxResults", @@ -5195,11 +5210,11 @@ }, "DefaultCodeRepositoryContains":{ "shape":"CodeRepositoryContains", - "documentation":"

A string in the name or URL of a git repository associated with this notebook instance. This filter returns only notebook instances associated with a git repository with a name that contains the specified string.

" + "documentation":"

A string in the name or URL of a Git repository associated with this notebook instance. This filter returns only notebook instances associated with a git repository with a name that contains the specified string.

" }, "AdditionalCodeRepositoryEquals":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

A filter that returns only notebook instances with associated with the specified git respository.

" + "documentation":"

A filter that returns only notebook instances with associated with the specified git repository.

" } } }, @@ -5858,7 +5873,7 @@ "documentation":"

A list of filters. Each filter acts on a property. Filters must contain at least one Filters value. For example, a NestedFilters call might include a filter on the PropertyName parameter of the InputDataConfig property: InputDataConfig.DataSource.S3DataSource.S3Uri.

" } }, - "documentation":"

Defines a list of NestedFilter objects. To satisfy the conditions specified in the NestedFilters call, a resource must satisfy the conditions of all of the filters.

For example, a NestedFilters could be defined using the training job's InputDataConfig property, this would be defined as a list of Channel objects.

A NestedFilters object contains multiple filters. For example, to find all training jobs whose name contains train and that have cat/data in their S3Uri (specified in InputDataConfig), you need to create a NestedFilters object that specifies the InputDataConfig property with the following Filter objects:

  • '{Name:\"InputDataConfig.ChannelName\", \"Operator\":\"EQUALS\", \"Value\":\"train\"}',

  • '{Name:\"InputDataConfig.DataSource.S3DataSource.S3Uri\", \"Operator\":\"CONTAINS\", \"Value\":\"cat/data\"}'

" + "documentation":"

Defines a list of NestedFilters objects. To satisfy the conditions specified in the NestedFilters call, a resource must satisfy the conditions of all of the filters.

For example, you could define a NestedFilters using the training job's InputDataConfig property to filter on Channel objects.

A NestedFilters object contains multiple filters. For example, to find all training jobs whose name contains train and that have cat/data in their S3Uri (specified in InputDataConfig), you need to create a NestedFilters object that specifies the InputDataConfig property with the following Filter objects:

  • '{Name:\"InputDataConfig.ChannelName\", \"Operator\":\"EQUALS\", \"Value\":\"train\"}',

  • '{Name:\"InputDataConfig.DataSource.S3DataSource.S3Uri\", \"Operator\":\"CONTAINS\", \"Value\":\"cat/data\"}'

" }, "NestedFiltersList":{ "type":"list", @@ -6044,11 +6059,11 @@ }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

The git repository associated with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to 3 git repositories associated with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" } }, "documentation":"

Provides summary information for an Amazon SageMaker notebook instance.

" @@ -6421,7 +6436,7 @@ "members":{ "UiTemplate":{ "shape":"UiTemplate", - "documentation":"

A Templateobject containing the worker UI template to render.

" + "documentation":"

A Template object containing the worker UI template to render.

" }, "Task":{ "shape":"RenderableTask", @@ -6929,7 +6944,7 @@ "members":{ "MaxRuntimeInSeconds":{ "shape":"MaxRuntimeInSeconds", - "documentation":"

The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 5 days.

" + "documentation":"

The maximum length of time, in seconds, that the training job can run. If model training does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. Maximum value is 28 days.

" } }, "documentation":"

Specifies how long model training can run. When model training reaches the limit, Amazon SageMaker ends the training job. Use this API to cap model training cost.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of training is not lost.

Training algorithms provided by Amazon SageMaker automatically saves the intermediate results of a model training job (it is best effort case, as model might not be ready to save as some stages, for example training just started). This intermediate data is a valid model artifact. You can use it to create a model (CreateModel).

" @@ -7014,7 +7029,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^((?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagKeyList":{ "type":"list", @@ -7404,7 +7419,7 @@ "members":{ "TrainingImage":{ "shape":"Image", - "documentation":"

The Amazon Amazon ECR registry path of the Docker image that contains the training algorithm.

" + "documentation":"

The Amazon ECR registry path of the Docker image that contains the training algorithm.

" }, "TrainingImageDigest":{ "shape":"ImageDigest", @@ -7469,7 +7484,7 @@ "members":{ "DataSource":{ "shape":"TransformDataSource", - "documentation":"

Describes the location of the channel data, meaning the S3 location of the input data that the model can consume.

" + "documentation":"

Describes the location of the channel data, which is, the S3 location of the input data that the model can consume.

" }, "ContentType":{ "shape":"ContentType", @@ -7477,11 +7492,11 @@ }, "CompressionType":{ "shape":"CompressionType", - "documentation":"

Compressing data helps save on storage space. If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None.

" + "documentation":"

If your transform data is compressed, specify the compression type. Amazon SageMaker automatically decompresses the data for the transform job accordingly. The default value is None.

" }, "SplitType":{ "shape":"SplitType", - "documentation":"

The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None, which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.

When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord, Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord, Amazon SageMaker sends individual records in each request.

Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord. Padding is not removed if the value of BatchStrategy is set to MultiRecord.

For more information about the RecordIO data format, see Data Format in the MXNet documentation. For more information about the TFRecord fofmat, see Consuming TFRecord data in the TensorFlow documentation.

" + "documentation":"

The method to use to split the transform job's data into smaller batches. If you don't want to split the data, specify None. If you want to split records on a newline character boundary, specify Line. To split records according to the RecordIO format, specify RecordIO. The default value is None.

Amazon SageMaker sends the maximum number of records per batch in each request up to the MaxPayloadInMB limit. For more information, see RecordIO data format.

For information about the RecordIO format, see Data Format.

" } }, "documentation":"

Describes the input source of a transform job and the way the transform job consumes it.

" @@ -7628,7 +7643,7 @@ "documentation":"

If the transform job failed, the reason it failed.

" } }, - "documentation":"

Provides a summary of a transform job. Multiple TransformJobSummary objects are returned as a list after calling ListTransformJobs.

" + "documentation":"

Provides a summary of a transform job. Multiple TransformJobSummary objects are returned as a list after in response to a ListTransformJobs call.

" }, "TransformOutput":{ "type":"structure", @@ -7636,7 +7651,7 @@ "members":{ "S3OutputPath":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix.

For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, the input data s3://bucket-name/input-name-prefix/dataset01/data.csv will have the transformed data stored at s3://bucket-name/key-name-prefix/dataset01/, based on the original name, as a series of .part files (.part0001, part0002, etc).

" + "documentation":"

The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix.

For every S3 object used as input for the transform job, the transformed data is stored in a corresponding subfolder in the location under the output prefix. For example, for the input data s3://bucket-name/input-name-prefix/dataset01/data.csv the transformed data is stored at s3://bucket-name/key-name-prefix/dataset01/. This is based on the original name, as a series of .part files (.part0001, part0002, etc.).

" }, "Accept":{ "shape":"Accept", @@ -7644,7 +7659,7 @@ }, "AssembleWith":{ "shape":"AssemblyType", - "documentation":"

Defines how to assemble the results of the transform job as a single S3 object. You should select a format that is most convenient to you. To concatenate the results in binary format, specify None. To add a newline character at the end of every transformed record, specify Line.

" + "documentation":"

Defines how to assemble the results of the transform job as a single S3 object. Choose a format that is most convenient to you. To concatenate the results in binary format, specify None. To add a newline character at the end of every transformed record, specify Line.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -7739,7 +7754,7 @@ "members":{ "CodeRepositoryName":{ "shape":"EntityName", - "documentation":"

The name of the git repository to update.

" + "documentation":"

The name of the Git repository to update.

" }, "GitConfig":{ "shape":"GitConfigForUpdate", @@ -7753,7 +7768,7 @@ "members":{ "CodeRepositoryArn":{ "shape":"CodeRepositoryArn", - "documentation":"

The ARN of the git repository.

" + "documentation":"

The ARN of the Git repository.

" } } }, @@ -7841,11 +7856,11 @@ }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

The git repository to associate with the notebook instance as its default code repository. This can be either the name of a git repository stored as a resource in your account, or the URL of a git repository in AWS CodeCommit or in any other git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to 3 git repositories to associate with the notebook instance. These can be either the names of git repositories stored as resources in your account, or the URL of git repositories in AWS CodeCommit or in any other git repository.. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository.. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", @@ -7857,11 +7872,11 @@ }, "DisassociateDefaultCodeRepository":{ "shape":"DisassociateDefaultCodeRepository", - "documentation":"

The name or URL of the default git repository to remove from this notebook instance.

" + "documentation":"

The name or URL of the default Git repository to remove from this notebook instance.

" }, "DisassociateAdditionalCodeRepositories":{ "shape":"DisassociateAdditionalCodeRepositories", - "documentation":"

A list of names or URLs of the default git repositories to remove from this notebook instance.

" + "documentation":"

A list of names or URLs of the default Git repositories to remove from this notebook instance.

" } } }, diff --git a/botocore/data/secretsmanager/2017-10-17/paginators-1.json b/botocore/data/secretsmanager/2017-10-17/paginators-1.json index ea142457..0f62e8e1 100644 --- a/botocore/data/secretsmanager/2017-10-17/paginators-1.json +++ b/botocore/data/secretsmanager/2017-10-17/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListSecrets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SecretList" + } + } } diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index f32f277a..4f6ea420 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -187,7 +187,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

  • To retrieve the resource policy that's attached to a secret, use GetResourcePolicy.

  • To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" + "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

  • To retrieve the resource policy that's attached to a secret, use GetResourcePolicy.

  • To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" }, "PutSecretValue":{ "name":"PutSecretValue", @@ -238,7 +238,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If instead the AWSPENDING staging label is present but is not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" + "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If instead the AWSPENDING staging label is present but is not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" }, "TagResource":{ "name":"TagResource", @@ -306,7 +306,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" } }, "shapes":{ @@ -375,11 +375,11 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "Tags":{ "shape":"TagListType", - "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

" + "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

" } } }, @@ -910,7 +910,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "VersionStages":{ "shape":"SecretVersionStagesType", @@ -1050,7 +1050,7 @@ }, "SecretBinaryType":{ "type":"blob", - "max":4096, + "max":7168, "min":0, "sensitive":true }, @@ -1064,7 +1064,7 @@ "members":{ "ARN":{ "shape":"SecretARNType", - "documentation":"

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the AWS Secrets Manager User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the AWS Secrets Manager User Guide.

" }, "Name":{ "shape":"SecretNameType", @@ -1132,7 +1132,7 @@ }, "SecretStringType":{ "type":"string", - "max":4096, + "max":7168, "min":0, "sensitive":true }, @@ -1225,7 +1225,7 @@ }, "Tags":{ "shape":"TagListType", - "documentation":"

The tags to attach to the secret. Each element in the list consists of a Key and a Value.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For the AWS CLI, you can also use the syntax: --Tags Key=\"Key1\",Value=\"Value1\",Key=\"Key2\",Value=\"Value2\"[,…]

" + "documentation":"

The tags to attach to the secret. Each element in the list consists of a Key and a Value.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For the AWS CLI, you can also use the syntax: --Tags Key=\"Key1\",Value=\"Value1\",Key=\"Key2\",Value=\"Value2\"[,…]

" } } }, @@ -1247,7 +1247,7 @@ }, "TagKeys":{ "shape":"TagKeyListType", - "documentation":"

A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + "documentation":"

A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" } } }, @@ -1278,7 +1278,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies updated text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. You can also 'escape' the double quote character in the embedded JSON text by prefacing each with a backslash. For example, the following string is surrounded by double-quotes. All of the embedded double quotes are escaped:

\"[{\\\"username\\\":\\\"bob\\\"},{\\\"password\\\":\\\"abc123xyz456\\\"}]\"

" + "documentation":"

(Optional) Specifies updated text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. You can also 'escape' the double quote character in the embedded JSON text by prefacing each with a backslash. For example, the following string is surrounded by double-quotes. All of the embedded double quotes are escaped:

\"[{\\\"username\\\":\\\"bob\\\"},{\\\"password\\\":\\\"abc123xyz456\\\"}]\"

" } } }, @@ -1340,5 +1340,5 @@ } } }, - "documentation":"AWS Secrets Manager API Reference

AWS Secrets Manager is a web service that enables you to store, manage, and retrieve, secrets.

This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the AWS Secrets Manager User Guide.

API Version

This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (such as Java, Ruby, .NET, iOS, and Android). The SDKs provide a convenient way to create programmatic access to AWS Secrets Manager. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the AWS Secrets Manager User Guide.

Secrets Manager supports GET and POST requests for all actions. That is, the API doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Support and Feedback for AWS Secrets Manager

We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the AWS Secrets Manager Discussion Forum. For more information about the AWS Discussion Forums, see Forums Help.

How examples are presented

The JSON that AWS Secrets Manager expects as your request parameters and that the service returns as a response to HTTP query requests are single, long strings without line breaks or white space formatting. The JSON shown in the examples is formatted with both line breaks and white space to improve readability. When example input parameters would also result in long strings that extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Logging API Requests

AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information that's collected by AWS CloudTrail, you can determine which requests were successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about AWS Secrets Manager and its support for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" + "documentation":"AWS Secrets Manager API Reference

AWS Secrets Manager is a web service that enables you to store, manage, and retrieve, secrets.

This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the AWS Secrets Manager User Guide.

API Version

This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (such as Java, Ruby, .NET, iOS, and Android). The SDKs provide a convenient way to create programmatic access to AWS Secrets Manager. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the AWS Secrets Manager User Guide.

Secrets Manager supports GET and POST requests for all actions. That is, the API doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Support and Feedback for AWS Secrets Manager

We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the AWS Secrets Manager Discussion Forum. For more information about the AWS Discussion Forums, see Forums Help.

How examples are presented

The JSON that AWS Secrets Manager expects as your request parameters and that the service returns as a response to HTTP query requests are single, long strings without line breaks or white space formatting. The JSON shown in the examples is formatted with both line breaks and white space to improve readability. When example input parameters would also result in long strings that extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Logging API Requests

AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information that's collected by AWS CloudTrail, you can determine which requests were successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about AWS Secrets Manager and its support for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" } diff --git a/botocore/data/securityhub/2018-10-26/paginators-1.json b/botocore/data/securityhub/2018-10-26/paginators-1.json index ea142457..1a612b70 100644 --- a/botocore/data/securityhub/2018-10-26/paginators-1.json +++ b/botocore/data/securityhub/2018-10-26/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "GetEnabledStandards": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "StandardsSubscriptions" + }, + "GetFindings": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Findings" + }, + "GetInsights": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Insights" + }, + "ListEnabledProductsForImport": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ProductSubscriptions" + }, + "ListInvitations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Invitations" + }, + "ListMembers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Members" + } + } } diff --git a/botocore/data/serverlessrepo/2017-09-08/paginators-1.json b/botocore/data/serverlessrepo/2017-09-08/paginators-1.json index ea142457..a39e5477 100644 --- a/botocore/data/serverlessrepo/2017-09-08/paginators-1.json +++ b/botocore/data/serverlessrepo/2017-09-08/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "ListApplicationDependencies": { + "input_token": "NextToken", + "limit_key": "MaxItems", + "output_token": "NextToken", + "result_key": "Dependencies" + }, + "ListApplicationVersions": { + "input_token": "NextToken", + "limit_key": "MaxItems", + "output_token": "NextToken", + "result_key": "Versions" + }, + "ListApplications": { + "input_token": "NextToken", + "limit_key": "MaxItems", + "output_token": "NextToken", + "result_key": "Applications" + } + } } diff --git a/botocore/data/servicecatalog/2015-12-10/paginators-1.json b/botocore/data/servicecatalog/2015-12-10/paginators-1.json index bb399615..5770fefa 100644 --- a/botocore/data/servicecatalog/2015-12-10/paginators-1.json +++ b/botocore/data/servicecatalog/2015-12-10/paginators-1.json @@ -53,6 +53,48 @@ "output_token": "PageToken", "input_token": "PageToken", "limit_key": "PageSize" + }, + "ListOrganizationPortfolioAccess": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "OrganizationNodes" + }, + "ListProvisionedProductPlans": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "ProvisionedProductPlans" + }, + "ListProvisioningArtifactsForServiceAction": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "ProvisioningArtifactViews" + }, + "ListRecordHistory": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "RecordDetails" + }, + "ListServiceActions": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "ServiceActionSummaries" + }, + "ListServiceActionsForProvisioningArtifact": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "ServiceActionSummaries" + }, + "ScanProvisionedProducts": { + "input_token": "PageToken", + "limit_key": "PageSize", + "output_token": "NextPageToken", + "result_key": "ProvisionedProducts" } } } diff --git a/botocore/data/servicecatalog/2015-12-10/service-2.json b/botocore/data/servicecatalog/2015-12-10/service-2.json index 56efe80d..1a5ef25e 100644 --- a/botocore/data/servicecatalog/2015-12-10/service-2.json +++ b/botocore/data/servicecatalog/2015-12-10/service-2.json @@ -3859,7 +3859,7 @@ }, "Status":{ "shape":"ProvisionedProductStatus", - "documentation":"

The current status of the provisioned product.

  • AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

  • UNDER_CHANGE - Transitive state, operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

  • TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

  • ERROR - An unexpected error occurred, the provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

" + "documentation":"

The current status of the provisioned product.

  • AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

  • UNDER_CHANGE - Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

  • TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

  • ERROR - An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

  • PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.

" }, "StatusMessage":{ "shape":"ProvisionedProductStatusMessage", @@ -3929,7 +3929,7 @@ }, "Status":{ "shape":"ProvisionedProductStatus", - "documentation":"

The current status of the provisioned product.

  • AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

  • UNDER_CHANGE - Transitive state, operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

  • TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

  • ERROR - An unexpected error occurred, the provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

" + "documentation":"

The current status of the provisioned product.

  • AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

  • UNDER_CHANGE - Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

  • TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

  • ERROR - An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

  • PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.

" }, "StatusMessage":{ "shape":"ProvisionedProductStatusMessage", diff --git a/botocore/data/ses/2010-12-01/paginators-1.json b/botocore/data/ses/2010-12-01/paginators-1.json index 1d61869e..1eb0054f 100644 --- a/botocore/data/ses/2010-12-01/paginators-1.json +++ b/botocore/data/ses/2010-12-01/paginators-1.json @@ -11,6 +11,23 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "ListConfigurationSets": { + "input_token": "NextToken", + "limit_key": "MaxItems", + "output_token": "NextToken", + "result_key": "ConfigurationSets" + }, + "ListReceiptRuleSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "RuleSets" + }, + "ListTemplates": { + "input_token": "NextToken", + "limit_key": "MaxItems", + "output_token": "NextToken", + "result_key": "TemplatesMetadata" } } } diff --git a/botocore/data/shield/2016-06-02/paginators-1.json b/botocore/data/shield/2016-06-02/paginators-1.json index 022e0dca..c5ded642 100644 --- a/botocore/data/shield/2016-06-02/paginators-1.json +++ b/botocore/data/shield/2016-06-02/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Protections" + }, + "ListAttacks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AttackSummaries" } } } diff --git a/botocore/data/shield/2016-06-02/service-2.json b/botocore/data/shield/2016-06-02/service-2.json index 8cee41b7..08f99a62 100644 --- a/botocore/data/shield/2016-06-02/service-2.json +++ b/botocore/data/shield/2016-06-02/service-2.json @@ -68,7 +68,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, Elastic IP Address, or an Amazon Route 53 hosted zone.

You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the AWS WAF console. For more information see Getting Started with AWS Shield Advanced and Add AWS Shield Advanced Protection to more AWS Resources.

" + "documentation":"

Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, AWS Global Accelerator accelerator, Elastic IP Address, or an Amazon Route 53 hosted zone.

You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the AWS WAF console. For more information see Getting Started with AWS Shield Advanced and Add AWS Shield Advanced Protection to more AWS Resources.

" }, "CreateSubscription":{ "name":"CreateSubscription", @@ -167,6 +167,7 @@ "output":{"shape":"DescribeProtectionResponse"}, "errors":[ {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

Lists the details of a Protection object.

" @@ -302,6 +303,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

Exception that indicates the specified AttackId does not exist, or the requester does not have the appropriate permissions to access the AttackId.

", "exception":true }, "AccessDeniedForDependencyException":{ @@ -514,7 +516,7 @@ }, "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The ARN (Amazon Resource Name) of the resource to be protected.

The ARN should be in one of the following formats:

  • For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id

  • For an Elastic Load Balancer (Classic Load Balancer): arn:aws:elasticloadbalancing:region:account-id:loadbalancer/load-balancer-name

  • For AWS CloudFront distribution: arn:aws:cloudfront::account-id:distribution/distribution-id

  • For Amazon Route 53: arn:aws:route53:::hostedzone/hosted-zone-id

  • For an Elastic IP address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id

" + "documentation":"

The ARN (Amazon Resource Name) of the resource to be protected.

The ARN should be in one of the following formats:

  • For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id

  • For an Elastic Load Balancer (Classic Load Balancer): arn:aws:elasticloadbalancing:region:account-id:loadbalancer/load-balancer-name

  • For an AWS CloudFront distribution: arn:aws:cloudfront::account-id:distribution/distribution-id

  • For an AWS Global Accelerator accelerator: arn:aws:globalaccelerator::account-id:accelerator/accelerator-id

  • For Amazon Route 53: arn:aws:route53:::hostedzone/hosted-zone-id

  • For an Elastic IP address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id

" } } }, @@ -617,11 +619,14 @@ }, "DescribeProtectionRequest":{ "type":"structure", - "required":["ProtectionId"], "members":{ "ProtectionId":{ "shape":"ProtectionId", - "documentation":"

The unique identifier (ID) for the Protection object that is described.

" + "documentation":"

The unique identifier (ID) for the Protection object that is described. When submitting the DescribeProtection request you must provide either the ResourceArn or the ProtectionID, but not both.

" + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN (Amazon Resource Name) of the AWS resource for the Protection object that is described. When submitting the DescribeProtection request you must provide either the ResourceArn or the ProtectionID, but not both.

" } } }, @@ -1159,5 +1164,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS Shield Advanced

This is the AWS Shield Advanced API Reference. This guide is for developers who need detailed information about the AWS Shield Advanced API actions, data types, and errors. For detailed information about AWS WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF and AWS Shield Advanced APIs, see the AWS WAF and AWS Shield Developer Guide.

" + "documentation":"AWS Shield Advanced

This is the AWS Shield Advanced API Reference. This guide is for developers who need detailed information about the AWS Shield Advanced API actions, data types, and errors. For detailed information about AWS WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF and AWS Shield Advanced APIs, see the AWS WAF and AWS Shield Developer Guide.

" } diff --git a/botocore/data/signer/2017-08-25/paginators-1.json b/botocore/data/signer/2017-08-25/paginators-1.json index ea142457..1e049e7d 100644 --- a/botocore/data/signer/2017-08-25/paginators-1.json +++ b/botocore/data/signer/2017-08-25/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "ListSigningJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "jobs" + }, + "ListSigningPlatforms": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "platforms" + }, + "ListSigningProfiles": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "profiles" + } + } } diff --git a/botocore/data/sms-voice/2018-09-05/service-2.json b/botocore/data/sms-voice/2018-09-05/service-2.json index 7938a2ce..e4fc8859 100644 --- a/botocore/data/sms-voice/2018-09-05/service-2.json +++ b/botocore/data/sms-voice/2018-09-05/service-2.json @@ -166,6 +166,32 @@ } ], "documentation" : "Obtain information about an event destination, including the types of events it reports, the Amazon Resource Name (ARN) of the destination, and the name of the event destination." }, + "ListConfigurationSets" : { + "name" : "ListConfigurationSets", + "http" : { + "method" : "GET", + "requestUri" : "/v1/sms-voice/configuration-sets", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListConfigurationSetsRequest" + }, + "output" : { + "shape" : "ListConfigurationSetsResponse", + "documentation" : "ListConfigurationSetsResponse" + }, + "errors" : [ { + "shape" : "TooManyRequestsException", + "documentation" : "TooManyRequestsException" + }, { + "shape" : "BadRequestException", + "documentation" : "BadRequestException" + }, { + "shape" : "InternalServiceErrorException", + "documentation" : "InternalServiceErrorException" + } ], + "documentation" : "List all of the configuration sets associated with your Amazon Pinpoint account in the current region." + }, "SendVoiceMessage" : { "name" : "SendVoiceMessage", "http" : { @@ -225,12 +251,12 @@ "shapes" : { "AlreadyExistsException" : { "type" : "structure", - "documentation" : "The resource specified in your request already exists.", "members" : { "Message" : { "shape" : "String" } }, + "documentation" : "The resource specified in your request already exists.", "exception" : true, "error" : { "httpStatusCode" : 409 @@ -238,12 +264,12 @@ }, "BadRequestException" : { "type" : "structure", - "documentation" : "The input you provided is invalid.", "members" : { "Message" : { "shape" : "String" } }, + "documentation" : "The input you provided is invalid.", "exception" : true, "error" : { "httpStatusCode" : 400 @@ -278,6 +304,13 @@ "documentation" : "An object that contains information about an event destination that sends data to Amazon CloudWatch Logs.", "required" : [ ] }, + "ConfigurationSets" : { + "type" : "list", + "documentation" : "An array that contains all of the configuration sets in your Amazon Pinpoint account in the current AWS Region.", + "member" : { + "shape" : "WordCharactersWithDelimiters" + } + }, "CreateConfigurationSetEventDestinationRequest" : { "type" : "structure", "members" : { @@ -477,29 +510,63 @@ }, "LimitExceededException" : { "type" : "structure", - "documentation" : "There are too many instances of the specified resource type.", - "exception" : true, "members" : { "Message" : { "shape" : "String" } }, + "documentation" : "There are too many instances of the specified resource type.", + "exception" : true, "error" : { "httpStatusCode" : 412 } }, + "ListConfigurationSetsRequest" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "NextToken", + "documentation" : "A token returned from a previous call to the API that indicates the position in the list of results." + }, + "PageSize" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "PageSize", + "documentation" : "Used to specify the number of items that should be returned in the response." + } + } + }, + "ListConfigurationSetsResponse" : { + "type" : "structure", + "members" : { + "ConfigurationSets" : { + "shape" : "ConfigurationSets", + "documentation" : "An object that contains a list of configuration sets for your account in the current region." + }, + "NextToken" : { + "shape" : "NextTokenString", + "documentation" : "A token returned from a previous call to ListConfigurationSets to indicate the position in the list of configuration sets." + } + }, + "documentation": "An object that contains information about the configuration sets for your account in the current region." + }, + "NextTokenString" : { + "type" : "string" + }, "NonEmptyString" : { "type" : "string" }, "NotFoundException" : { "type" : "structure", - "documentation" : "The resource you attempted to access doesn't exist.", - "exception" : true, "members" : { "Message" : { "shape" : "String" } }, + "documentation" : "The resource you attempted to access doesn't exist.", + "exception" : true, "error" : { "httpStatusCode" : 404 } @@ -593,13 +660,13 @@ }, "TooManyRequestsException" : { "type" : "structure", - "documentation" : "You've issued too many requests to the resource. Wait a few minutes, and then try again.", - "exception" : true, "members" : { "Message" : { "shape" : "String" } }, + "documentation" : "You've issued too many requests to the resource. Wait a few minutes, and then try again.", + "exception" : true, "error" : { "httpStatusCode" : 429 } diff --git a/botocore/data/sms/2016-10-24/paginators-1.json b/botocore/data/sms/2016-10-24/paginators-1.json index 6e184415..52a8d570 100644 --- a/botocore/data/sms/2016-10-24/paginators-1.json +++ b/botocore/data/sms/2016-10-24/paginators-1.json @@ -23,6 +23,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "serverList" + }, + "ListApps": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "apps" } } } diff --git a/botocore/data/snowball/2016-06-30/paginators-1.json b/botocore/data/snowball/2016-06-30/paginators-1.json index 79596df9..e4e974f3 100644 --- a/botocore/data/snowball/2016-06-30/paginators-1.json +++ b/botocore/data/snowball/2016-06-30/paginators-1.json @@ -11,6 +11,24 @@ "output_token": "NextToken", "input_token": "NextToken", "result_key": "Addresses" + }, + "ListClusterJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "JobListEntries" + }, + "ListClusters": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ClusterListEntries" + }, + "ListCompatibleImages": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CompatibleImages" } } } diff --git a/botocore/data/sns/2010-03-31/paginators-1.json b/botocore/data/sns/2010-03-31/paginators-1.json index 455e4708..1c30bcc8 100644 --- a/botocore/data/sns/2010-03-31/paginators-1.json +++ b/botocore/data/sns/2010-03-31/paginators-1.json @@ -24,6 +24,11 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": "Topics" + }, + "ListPhoneNumbersOptedOut": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "phoneNumbers" } } } diff --git a/botocore/data/ssm/2014-11-06/paginators-1.json b/botocore/data/ssm/2014-11-06/paginators-1.json index 206424a0..e7b95820 100644 --- a/botocore/data/ssm/2014-11-06/paginators-1.json +++ b/botocore/data/ssm/2014-11-06/paginators-1.json @@ -71,6 +71,174 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "DescribeAutomationExecutions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AutomationExecutionMetadataList" + }, + "DescribeAutomationStepExecutions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "StepExecutions" + }, + "DescribeAvailablePatches": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Patches" + }, + "DescribeEffectiveInstanceAssociations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Associations" + }, + "DescribeEffectivePatchesForPatchBaseline": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "EffectivePatches" + }, + "DescribeInstanceAssociationsStatus": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstanceAssociationStatusInfos" + }, + "DescribeInstancePatchStates": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstancePatchStates" + }, + "DescribeInstancePatchStatesForPatchGroup": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstancePatchStates" + }, + "DescribeInstancePatches": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Patches" + }, + "DescribeInventoryDeletions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InventoryDeletions" + }, + "DescribeMaintenanceWindowExecutionTaskInvocations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "WindowExecutionTaskInvocationIdentities" + }, + "DescribeMaintenanceWindowExecutionTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "WindowExecutionTaskIdentities" + }, + "DescribeMaintenanceWindowExecutions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "WindowExecutions" + }, + "DescribeMaintenanceWindowSchedule": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScheduledWindowExecutions" + }, + "DescribeMaintenanceWindowTargets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Targets" + }, + "DescribeMaintenanceWindowTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tasks" + }, + "DescribeMaintenanceWindows": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "WindowIdentities" + }, + "DescribeMaintenanceWindowsForTarget": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "WindowIdentities" + }, + "DescribePatchBaselines": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "BaselineIdentities" + }, + "DescribePatchGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Mappings" + }, + "DescribeSessions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Sessions" + }, + "GetInventorySchema": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Schemas" + }, + "ListAssociationVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AssociationVersions" + }, + "ListComplianceItems": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ComplianceItems" + }, + "ListComplianceSummaries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ComplianceSummaryItems" + }, + "ListDocumentVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DocumentVersions" + }, + "ListResourceComplianceSummaries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ResourceComplianceSummaryItems" + }, + "ListResourceDataSync": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ResourceDataSyncItems" } } } diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 9d9dcf40..fd58afe6 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -1802,6 +1802,10 @@ "CreatedDate":{ "shape":"CreatedDate", "documentation":"

The date the activation was created.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags assigned to the activation.

" } }, "documentation":"

An activation registers one or more on-premises servers or virtual machines (VMs) with AWS so that you can configure those servers or VMs using Run Command. A server or VM that has been registered with AWS is called a managed instance.

" @@ -1979,6 +1983,10 @@ "shape":"DocumentVersion", "documentation":"

The document version.

" }, + "AutomationTargetParameterName":{ + "shape":"AutomationTargetParameterName", + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + }, "Parameters":{ "shape":"Parameters", "documentation":"

A description of the parameters for a document.

" @@ -2469,9 +2477,12 @@ "AttachmentInformation":{ "type":"structure", "members":{ - "Name":{"shape":"AttachmentName"} + "Name":{ + "shape":"AttachmentName", + "documentation":"

The name of the attachment.

" + } }, - "documentation":"

An attribute of an attachment, such as the attachment name or size.

" + "documentation":"

An attribute of an attachment, such as the attachment name.

" }, "AttachmentInformationList":{ "type":"list", @@ -2804,7 +2815,7 @@ }, "AutomationType":{ "shape":"AutomationType", - "documentation":"

Use this filter with DescribeAutomationExecution. Specify either Local of CrossAccount. CrossAccount is an Automation that executes in multiple AWS Regions and accounts. For more information, see Concurrently Executing Automations in Multiple AWS Regions and Accounts in the AWS Systems Manager User Guide.

" + "documentation":"

Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that executes in multiple AWS Regions and accounts. For more information, see Concurrently Executing Automations in Multiple AWS Regions and Accounts in the AWS Systems Manager User Guide.

" } }, "documentation":"

Details about a specific Automation execution.

" @@ -2865,6 +2876,11 @@ "documentation":"

The specified step name and execution ID don't exist. Verify the information and try again.

", "exception":true }, + "AutomationTargetParameterName":{ + "type":"string", + "max":50, + "min":1 + }, "AutomationType":{ "type":"string", "enum":[ @@ -3583,6 +3599,10 @@ "ExpirationDate":{ "shape":"ExpirationDate", "documentation":"

The date by which this activation request should expire. The default value is 24 hours.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an activation to identify which servers or virtual machines (VMs) in your on-premises environment you intend to activate. In this case, you could specify the following key name/value pairs:

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

When you install SSM Agent on your on-premises servers and VMs, you specify an activation ID and code. When you specify the activation ID and code, tags assigned to the activation are automatically applied to the on-premises servers or VMs.

You can't add tags to or delete tags from an existing activation. You can tag your on-premises servers and VMs after they connect to Systems Manager for the first time and are assigned a managed instance ID. This means they are listed in the AWS Systems Manager console with an ID that is prefixed with \"mi-\". For information about how to add tags to your managed instances, see AddTagsToResource. For information about how to remove tags from your managed instances, see RemoveTagsFromResource.

" } } }, @@ -3630,6 +3650,10 @@ "shape":"Parameters", "documentation":"

A description of the parameters for a document.

" }, + "AutomationTargetParameterName":{ + "shape":"AutomationTargetParameterName", + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + }, "DocumentVersion":{ "shape":"DocumentVersion", "documentation":"

The document version.

" @@ -3714,6 +3738,10 @@ "shape":"AssociationName", "documentation":"

Specify a descriptive name for the association.

" }, + "AutomationTargetParameterName":{ + "shape":"AutomationTargetParameterName", + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + }, "MaxErrors":{ "shape":"MaxErrors", "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" @@ -3771,6 +3799,10 @@ "TargetType":{ "shape":"TargetType", "documentation":"

Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see AWS Resource Types Reference in the AWS CloudFormation User Guide.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an SSM document to identify the types of targets or the environment where it will run. In this case, you could specify the following key name/value pairs:

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

To add tags to an existing SSM document, use the AddTagsToResource action.

" } } }, @@ -3833,6 +3865,10 @@ "shape":"ClientToken", "documentation":"

User-provided idempotency token.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Maintenance Window to identify the type of tasks it will run, the types of targets, and the environment it will run in. In this case, you could specify the following key name/value pairs:

  • Key=TaskType,Value=AgentUpdate

  • Key=OS,Value=Windows

  • Key=Environment,Value=Production

To add tags to an existing Maintenance Window, use the AddTagsToResource action.

" } } }, @@ -3898,6 +3934,10 @@ "shape":"ClientToken", "documentation":"

User-provided idempotency token.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to. In this case, you could specify the following key name/value pairs:

  • Key=PatchSeverity,Value=Critical

  • Key=OS,Value=Windows

To add tags to an existing patch baseline, use the AddTagsToResource action.

" } } }, @@ -4396,7 +4436,7 @@ }, "AssociationVersion":{ "shape":"AssociationVersion", - "documentation":"

Specify the association version to retrieve. To view the latest version, either specify $LATEST for this parameter, or omit this parameter. To view a list of all associations for an instance, use ListInstanceAssociations. To get a list of versions for a specific association, use ListAssociationVersions.

" + "documentation":"

Specify the association version to retrieve. To view the latest version, either specify $LATEST for this parameter, or omit this parameter. To view a list of all associations for an instance, use ListAssociations. To get a list of versions for a specific association, use ListAssociationVersions.

" } } }, @@ -9911,7 +9951,7 @@ "documentation":"

The value for the filter key.

See PatchFilter for lists of valid values for each key based on operating system type.

" } }, - "documentation":"

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Windows7

  • Windows8

  • Windows8.1

  • Windows8Embedded

  • Windows10

  • Windows10LTSB

  • WindowsServer2008

  • WindowsServer2008R2

  • WindowsServer2012

  • WindowsServer2012R2

  • WindowsServer2016

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • CriticalUpdates

  • DefinitionUpdates

  • Drivers

  • FeaturePacks

  • SecurityUpdates

  • ServicePacks

  • Tools

  • UpdateRollups

  • Updates

  • Upgrades

Supported key: MSRC_SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

  • Unspecified

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Ubuntu14.04

  • Ubuntu16.04

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: PRIORITY

Supported values:

  • Required

  • Important

  • Standard

  • Optional

  • Extra

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2012.03

  • AmazonLinux2012.09

  • AmazonLinux2013.03

  • AmazonLinux2013.09

  • AmazonLinux2014.03

  • AmazonLinux2014.09

  • AmazonLinux2015.03

  • AmazonLinux2015.09

  • AmazonLinux2016.03

  • AmazonLinux2016.09

  • AmazonLinux2017.03

  • AmazonLinux2017.09

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

Amazon Linux 2 Operating Systems

The supported keys for Amazon Linux 2 operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2

  • AmazonLinux2.0

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • RedhatEnterpriseLinux6.5

  • RedhatEnterpriseLinux6.6

  • RedhatEnterpriseLinux6.7

  • RedhatEnterpriseLinux6.8

  • RedhatEnterpriseLinux6.9

  • RedhatEnterpriseLinux7.0

  • RedhatEnterpriseLinux7.1

  • RedhatEnterpriseLinux7.2

  • RedhatEnterpriseLinux7.3

  • RedhatEnterpriseLinux7.4

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

SUSE Linux Enterprise Server (SLES) Operating Systems

The supported keys for SLES operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Suse12.0

  • Suse12.1

  • Suse12.2

  • Suse12.3

  • Suse12.4

  • Suse12.5

  • Suse12.6

  • Suse12.7

  • Suse12.8

  • Suse12.9

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Recommended

  • Optional

  • Feature

  • Document

  • Yast

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

CentOS Operating Systems

The supported keys for CentOS operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • CentOS6.5

  • CentOS6.6

  • CentOS6.7

  • CentOS6.8

  • CentOS6.9

  • CentOS7.0

  • CentOS7.1

  • CentOS7.2

  • CentOS7.3

  • CentOS7.4

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

" + "documentation":"

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Windows7

  • Windows8

  • Windows8.1

  • Windows8Embedded

  • Windows10

  • Windows10LTSB

  • WindowsServer2008

  • WindowsServer2008R2

  • WindowsServer2012

  • WindowsServer2012R2

  • WindowsServer2016

  • WindowsServer2019

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • CriticalUpdates

  • DefinitionUpdates

  • Drivers

  • FeaturePacks

  • SecurityUpdates

  • ServicePacks

  • Tools

  • UpdateRollups

  • Updates

  • Upgrades

Supported key: MSRC_SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

  • Unspecified

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Ubuntu14.04

  • Ubuntu16.04

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: PRIORITY

Supported values:

  • Required

  • Important

  • Standard

  • Optional

  • Extra

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2012.03

  • AmazonLinux2012.09

  • AmazonLinux2013.03

  • AmazonLinux2013.09

  • AmazonLinux2014.03

  • AmazonLinux2014.09

  • AmazonLinux2015.03

  • AmazonLinux2015.09

  • AmazonLinux2016.03

  • AmazonLinux2016.09

  • AmazonLinux2017.03

  • AmazonLinux2017.09

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

Amazon Linux 2 Operating Systems

The supported keys for Amazon Linux 2 operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2

  • AmazonLinux2.0

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • RedhatEnterpriseLinux6.5

  • RedhatEnterpriseLinux6.6

  • RedhatEnterpriseLinux6.7

  • RedhatEnterpriseLinux6.8

  • RedhatEnterpriseLinux6.9

  • RedhatEnterpriseLinux7.0

  • RedhatEnterpriseLinux7.1

  • RedhatEnterpriseLinux7.2

  • RedhatEnterpriseLinux7.3

  • RedhatEnterpriseLinux7.4

  • RedhatEnterpriseLinux7.5

  • RedhatEnterpriseLinux7.6

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

SUSE Linux Enterprise Server (SLES) Operating Systems

The supported keys for SLES operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Suse12.0

  • Suse12.1

  • Suse12.2

  • Suse12.3

  • Suse12.4

  • Suse12.5

  • Suse12.6

  • Suse12.7

  • Suse12.8

  • Suse12.9

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Recommended

  • Optional

  • Feature

  • Document

  • Yast

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

CentOS Operating Systems

The supported keys for CentOS operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • CentOS6.5

  • CentOS6.6

  • CentOS6.7

  • CentOS6.8

  • CentOS6.9

  • CentOS7.0

  • CentOS7.1

  • CentOS7.2

  • CentOS7.3

  • CentOS7.4

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

" }, "PatchFilterGroup":{ "type":"structure", @@ -10112,7 +10152,7 @@ }, "Configuration":{ "shape":"PatchSourceConfiguration", - "documentation":"

The value of the yum repo configuration. For example:

cachedir=/var/cache/yum/$basesearch

$releasever

keepcache=0

debuglevel=2

" + "documentation":"

The value of the yum repo configuration. For example:

[main]

cachedir=/var/cache/yum/$basesearch$releasever

keepcache=0

debuglevel=2

" } }, "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repository. Applies to Linux instances only.

" @@ -10314,6 +10354,10 @@ "AllowedPattern":{ "shape":"AllowedPattern", "documentation":"

A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: AllowedPattern=^\\d+$

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter. In this case, you could specify the following key name/value pairs:

  • Key=Resource,Value=S3bucket

  • Key=OS,Value=Windows

  • Key=ParameterType,Value=LicenseKey

To add tags to an existing Systems Manager parameter, use the AddTagsToResource action.

" } } }, @@ -11574,11 +11618,12 @@ "type":"string", "max":128, "min":1, - "pattern":"^(?!^(?i)aws:)(?=^[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$).*$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagList":{ "type":"list", - "member":{"shape":"Tag"} + "member":{"shape":"Tag"}, + "max":1000 }, "TagValue":{ "type":"string", @@ -11843,6 +11888,10 @@ "shape":"AssociationVersion", "documentation":"

This parameter is provided for concurrency control purposes. You must specify the latest association version in the service. If you want to ensure that this request succeeds, either specify $LATEST, or omit this parameter.

" }, + "AutomationTargetParameterName":{ + "shape":"AutomationTargetParameterName", + "documentation":"

Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.

" + }, "MaxErrors":{ "shape":"MaxErrors", "documentation":"

The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.

Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.

" diff --git a/botocore/data/stepfunctions/2016-11-23/service-2.json b/botocore/data/stepfunctions/2016-11-23/service-2.json index 1f67f55d..bed06f47 100644 --- a/botocore/data/stepfunctions/2016-11-23/service-2.json +++ b/botocore/data/stepfunctions/2016-11-23/service-2.json @@ -23,9 +23,10 @@ "output":{"shape":"CreateActivityOutput"}, "errors":[ {"shape":"ActivityLimitExceeded"}, - {"shape":"InvalidName"} + {"shape":"InvalidName"}, + {"shape":"TooManyTags"} ], - "documentation":"

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

", + "documentation":"

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

", "idempotent":true }, "CreateStateMachine":{ @@ -42,9 +43,10 @@ {"shape":"InvalidName"}, {"shape":"StateMachineAlreadyExists"}, {"shape":"StateMachineDeleting"}, - {"shape":"StateMachineLimitExceeded"} + {"shape":"StateMachineLimitExceeded"}, + {"shape":"TooManyTags"} ], - "documentation":"

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

", + "documentation":"

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

", "idempotent":true }, "DeleteActivity":{ @@ -142,7 +144,7 @@ {"shape":"ActivityWorkerLimitExceeded"}, {"shape":"InvalidArn"} ], - "documentation":"

Used by workers to retrieve a task (with the specified activity ARN) which has been scheduled for execution by a running state machine. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available (i.e. an execution of a task of this type is needed.) The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns a taskToken with a null string.

Workers should set their client side socket timeout to at least 65 seconds (5 seconds higher than the maximum time the service may hold the poll request).

Polling with GetActivityTask can cause latency in some implementations. See Avoid Latency When Polling for Activity Tasks in the Step Functions Developer Guide.

" + "documentation":"

Used by workers to retrieve a task (with the specified activity ARN) which has been scheduled for execution by a running state machine. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available (i.e. an execution of a task of this type is needed.) The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns a taskToken with a null string.

Workers should set their client side socket timeout to at least 65 seconds (5 seconds higher than the maximum time the service may hold the poll request).

Polling with GetActivityTask can cause latency in some implementations. See Avoid Latency When Polling for Activity Tasks in the Step Functions Developer Guide.

" }, "GetExecutionHistory":{ "name":"GetExecutionHistory", @@ -498,7 +500,11 @@ "members":{ "name":{ "shape":"Name", - "documentation":"

The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

" + "documentation":"

The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The list of tags to add to a resource.

" } } }, @@ -533,11 +539,15 @@ }, "definition":{ "shape":"Definition", - "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" + "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" }, "roleArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role to use for this state machine.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Tags to be added when creating a state machine.

" } } }, @@ -710,7 +720,7 @@ }, "definition":{ "shape":"Definition", - "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" + "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" }, "roleArn":{ "shape":"Arn", @@ -756,7 +766,7 @@ }, "definition":{ "shape":"Definition", - "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" + "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" }, "roleArn":{ "shape":"Arn", @@ -933,7 +943,7 @@ "documentation":"

A token that identifies the scheduled task. This token must be copied and included in subsequent calls to SendTaskHeartbeat, SendTaskSuccess or SendTaskFailure in order to report the progress or completion of the task.

" }, "input":{ - "shape":"SensitiveData", + "shape":"SensitiveDataJobInput", "documentation":"

The string that contains the JSON input data for the task.

" } } @@ -1007,14 +1017,38 @@ "activityStartedEventDetails":{"shape":"ActivityStartedEventDetails"}, "activitySucceededEventDetails":{"shape":"ActivitySucceededEventDetails"}, "activityTimedOutEventDetails":{"shape":"ActivityTimedOutEventDetails"}, - "taskFailedEventDetails":{"shape":"TaskFailedEventDetails"}, - "taskScheduledEventDetails":{"shape":"TaskScheduledEventDetails"}, - "taskStartFailedEventDetails":{"shape":"TaskStartFailedEventDetails"}, - "taskStartedEventDetails":{"shape":"TaskStartedEventDetails"}, - "taskSubmitFailedEventDetails":{"shape":"TaskSubmitFailedEventDetails"}, - "taskSubmittedEventDetails":{"shape":"TaskSubmittedEventDetails"}, - "taskSucceededEventDetails":{"shape":"TaskSucceededEventDetails"}, - "taskTimedOutEventDetails":{"shape":"TaskTimedOutEventDetails"}, + "taskFailedEventDetails":{ + "shape":"TaskFailedEventDetails", + "documentation":"

Contains details about the failure of a task.

" + }, + "taskScheduledEventDetails":{ + "shape":"TaskScheduledEventDetails", + "documentation":"

Contains details about a task that was scheduled.

" + }, + "taskStartFailedEventDetails":{ + "shape":"TaskStartFailedEventDetails", + "documentation":"

Contains details about a task that failed to start.

" + }, + "taskStartedEventDetails":{ + "shape":"TaskStartedEventDetails", + "documentation":"

Contains details about a task that was started.

" + }, + "taskSubmitFailedEventDetails":{ + "shape":"TaskSubmitFailedEventDetails", + "documentation":"

Contains details about a task that where the submit failed.

" + }, + "taskSubmittedEventDetails":{ + "shape":"TaskSubmittedEventDetails", + "documentation":"

Contains details about a submitted task.

" + }, + "taskSucceededEventDetails":{ + "shape":"TaskSucceededEventDetails", + "documentation":"

Contains details about a task that succeeded.

" + }, + "taskTimedOutEventDetails":{ + "shape":"TaskTimedOutEventDetails", + "documentation":"

Contains details about a task that timed out.

" + }, "executionFailedEventDetails":{"shape":"ExecutionFailedEventDetails"}, "executionStartedEventDetails":{"shape":"ExecutionStartedEventDetails"}, "executionSucceededEventDetails":{"shape":"ExecutionSucceededEventDetails"}, @@ -1440,6 +1474,11 @@ "max":32768, "sensitive":true }, + "SensitiveDataJobInput":{ + "type":"string", + "max":65536, + "sensitive":true + }, "SensitiveError":{ "type":"string", "max":256, @@ -1456,7 +1495,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the execution. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

" + "documentation":"

The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • whitespace

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

" }, "input":{ "shape":"SensitiveData", @@ -1885,7 +1924,7 @@ "message":{"shape":"ErrorMessage"}, "resourceName":{"shape":"Arn"} }, - "documentation":"

You've exceeded the number of tags allowed for a resource. See the Limits Topic in the AWS Step Functions Developer Guide.

", + "documentation":"

You've exceeded the number of tags allowed for a resource. See the Limits Topic in the AWS Step Functions Developer Guide.

", "exception":true }, "UntagResourceInput":{ @@ -1920,7 +1959,7 @@ }, "definition":{ "shape":"Definition", - "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" + "documentation":"

The Amazon States Language definition of the state machine. See Amazon States Language.

" }, "roleArn":{ "shape":"Arn", @@ -1939,5 +1978,5 @@ } } }, - "documentation":"AWS Step Functions

AWS Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows.

You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues.

Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on AWS, your own servers, or any system that has access to AWS. You can access and use Step Functions using the console, the AWS SDKs, or an HTTP API. For more information about Step Functions, see the AWS Step Functions Developer Guide .

" + "documentation":"AWS Step Functions

AWS Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows.

You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues.

Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on AWS, your own servers, or any system that has access to AWS. You can access and use Step Functions using the console, the AWS SDKs, or an HTTP API. For more information about Step Functions, see the AWS Step Functions Developer Guide .

" } diff --git a/botocore/data/storagegateway/2013-06-30/paginators-1.json b/botocore/data/storagegateway/2013-06-30/paginators-1.json index b5e10c05..c942b8e7 100644 --- a/botocore/data/storagegateway/2013-06-30/paginators-1.json +++ b/botocore/data/storagegateway/2013-06-30/paginators-1.json @@ -35,6 +35,12 @@ "limit_key": "Limit", "output_token": "Marker", "result_key": "VolumeInfos" + }, + "ListTapes": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "TapeInfos" } } } diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index 7c6fbb61..24f19d82 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -82,6 +82,20 @@ ], "documentation":"

Configures one or more gateway local disks as working storage for a gateway. This operation is only supported in the stored volume gateway type. This operation is deprecated in cached volume API version 20120630. Use AddUploadBuffer instead.

Working storage is also referred to as upload buffer. You can also use the AddUploadBuffer operation to add upload buffer to a stored volume gateway.

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add working storage, and one or more disk IDs that you want to configure as working storage.

" }, + "AttachVolume":{ + "name":"AttachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVolumeInput"}, + "output":{"shape":"AttachVolumeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Connects a volume to an iSCSI connection and then attaches the volume to the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.

" + }, "CancelArchival":{ "name":"CancelArchival", "http":{ @@ -574,6 +588,20 @@ ], "documentation":"

Returns information about the working storage of a gateway. This operation is only supported in the stored volumes gateway type. This operation is deprecated in cached volumes API version (20120630). Use DescribeUploadBuffer instead.

Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored volume gateway.

The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.

" }, + "DetachVolume":{ + "name":"DetachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVolumeInput"}, + "output":{"shape":"DetachVolumeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.

" + }, "DisableGateway":{ "name":"DisableGateway", "http":{ @@ -1118,6 +1146,51 @@ }, "documentation":"

A JSON object containing the of the gateway for which working storage was configured.

" }, + "AttachVolumeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "VolumeARN", + "NetworkInterfaceId" + ], + "members":{ + "GatewayARN":{ + "shape":"GatewayARN", + "documentation":"

The Amazon Resource Name (ARN) of the gateway that you want to attach the volume to.

" + }, + "TargetName":{ + "shape":"TargetName", + "documentation":"

The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. The target name must be unique across all volumes on a gateway.

If you don't specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.

" + }, + "VolumeARN":{ + "shape":"VolumeARN", + "documentation":"

The Amazon Resource Name (ARN) of the volume to attach to the specified gateway.

" + }, + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + }, + "DiskId":{ + "shape":"DiskId", + "documentation":"

The unique device ID or other distinguishing data that identifies the local disk used to create the volume. This value is only required when you are attaching a stored volume.

" + } + }, + "documentation":"

AttachVolumeInput

" + }, + "AttachVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{ + "shape":"VolumeARN", + "documentation":"

The Amazon Resource Name (ARN) of the volume that was attached to the gateway.

" + }, + "TargetARN":{ + "shape":"TargetARN", + "documentation":"

The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name for the initiator that was used to connect to the target.

" + } + }, + "documentation":"

AttachVolumeOutput

" + }, "Authentication":{ "type":"string", "documentation":"

The authentication method of the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

", @@ -1157,6 +1230,10 @@ "shape":"VolumeStatus", "documentation":"

One of the VolumeStatus values that indicates the state of the storage volume.

" }, + "VolumeAttachmentStatus":{ + "shape":"VolumeAttachmentStatus", + "documentation":"

A value that indicates whether a storage volume is attached to or detached from a gateway.

" + }, "VolumeSizeInBytes":{ "shape":"long", "documentation":"

The size, in bytes, of the volume capacity.

" @@ -1181,7 +1258,11 @@ "shape":"VolumeUsedInBytes", "documentation":"

The size of the data stored on the volume in bytes.

This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.

" }, - "KMSKey":{"shape":"KMSKey"} + "KMSKey":{"shape":"KMSKey"}, + "TargetName":{ + "shape":"TargetName", + "documentation":"

The name of the iSCSI target that is used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume.

" + } }, "documentation":"

Describes an iSCSI cached volume.

" }, @@ -1329,7 +1410,7 @@ }, "TargetARN":{ "shape":"TargetARN", - "documentation":"

he Amazon Resource Name (ARN) of the volume target that includes the iSCSI name that initiators can use to connect to the target.

" + "documentation":"

The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.

" } } }, @@ -1595,7 +1676,7 @@ }, "TargetARN":{ "shape":"TargetARN", - "documentation":"

he Amazon Resource Name (ARN) of the volume target that includes the iSCSI name that initiators can use to connect to the target.

" + "documentation":"

The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -2311,6 +2392,31 @@ "max":255, "min":1 }, + "DetachVolumeInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{ + "shape":"VolumeARN", + "documentation":"

The Amazon Resource Name (ARN) of the volume to detach from the gateway.

" + }, + "ForceDetach":{ + "shape":"Boolean", + "documentation":"

Set to true to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is false. If this value is set to false, you must manually disconnect the iSCSI connection from the target volume.

" + } + }, + "documentation":"

AttachVolumeInput

" + }, + "DetachVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{ + "shape":"VolumeARN", + "documentation":"

The Amazon Resource Name (ARN) of the volume that was detached.

" + } + }, + "documentation":"

AttachVolumeOutput

" + }, "DeviceType":{ "type":"string", "max":50, @@ -2382,7 +2488,7 @@ "DiskAllocationType":{"shape":"DiskAllocationType"}, "DiskAllocationResource":{ "shape":"string", - "documentation":"

The iSCSI Qualified Name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

" + "documentation":"

The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

" }, "DiskAttributeList":{"shape":"DiskAttributeList"} }, @@ -2390,7 +2496,7 @@ }, "DiskAllocationType":{ "type":"string", - "documentation":"

One of the DiskAllocationType enumeration values that identifies how a local disk is used. Valid values: \"UPLOAD_BUFFER\", \"CACHE_STORAGE\".

", + "documentation":"

One of the DiskAllocationType enumeration values that identifies how a local disk is used. Valid values: UPLOAD_BUFFER, CACHE_STORAGE

", "max":100, "min":3 }, @@ -2421,14 +2527,20 @@ }, "DomainName":{ "type":"string", + "max":1024, + "min":1, "pattern":"^([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,}$" }, "DomainUserName":{ "type":"string", + "max":1024, + "min":1, "pattern":"^\\w[\\w\\.\\- ]*$" }, "DomainUserPassword":{ "type":"string", + "max":1024, + "min":1, "pattern":"^[ -~]+$", "sensitive":true }, @@ -2572,7 +2684,6 @@ "FolderList":{ "type":"list", "member":{"shape":"Folder"}, - "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to \"true\", the entire S3 bucket that the file share has access to is refreshed.

", "max":50, "min":1 }, @@ -2648,6 +2759,14 @@ "type":"list", "member":{"shape":"GatewayInfo"} }, + "Host":{ + "type":"string", + "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(:(\\d+))?$" + }, + "Hosts":{ + "type":"list", + "member":{"shape":"Host"} + }, "HourOfDay":{ "type":"integer", "max":23, @@ -2713,12 +2832,20 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name (ARN) of the file gateway you want to add to the Active Directory domain.

" + "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

" }, "DomainName":{ "shape":"DomainName", "documentation":"

The name of the domain that you want the gateway to join.

" }, + "OrganizationalUnit":{ + "shape":"OrganizationalUnit", + "documentation":"

The organizational unit (OU) is a container with an Active Directory that can hold users, groups, computers, and other OUs and this parameter specifies the OU that the gateway will join within the AD domain.

" + }, + "DomainControllers":{ + "shape":"Hosts", + "documentation":"

List of IPv4 addresses, NetBIOS names, or host names of your domain server. If you need to specify the port number include it after the colon (“:”). For example, mydc.mydomain.com:389.

" + }, "UserName":{ "shape":"DomainUserName", "documentation":"

Sets the user name of user who has permission to add the gateway to the Active Directory domain.

" @@ -3101,6 +3228,11 @@ "aws-exec-read" ] }, + "OrganizationalUnit":{ + "type":"string", + "max":1024, + "min":1 + }, "Path":{ "type":"string", "documentation":"

The file share path used by the NFS client to identify the mount point.

" @@ -3129,13 +3261,20 @@ "type":"structure", "required":["FileShareARN"], "members":{ - "FileShareARN":{"shape":"FileShareARN"}, - "FolderList":{"shape":"FolderList"}, + "FileShareARN":{ + "shape":"FileShareARN", + "documentation":"

The Amazon Resource Name (ARN) of the file share you want to refresh.

" + }, + "FolderList":{ + "shape":"FolderList", + "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to \"true\", the entire S3 bucket that the file share has access to is refreshed.

" + }, "Recursive":{ "shape":"Boolean", "documentation":"

A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to \"true\", each folder that is listed in FolderList is recursively updated. Otherwise, subfolders listed in FolderList are not refreshed. Only objects that are in folders listed directly under FolderList are found and used for the update. The default is \"true\".

" } - } + }, + "documentation":"

RefreshCacheInput

" }, "RefreshCacheOutput":{ "type":"structure", @@ -3457,6 +3596,10 @@ "shape":"VolumeStatus", "documentation":"

One of the VolumeStatus values that indicates the state of the storage volume.

" }, + "VolumeAttachmentStatus":{ + "shape":"VolumeAttachmentStatus", + "documentation":"

A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway.

" + }, "VolumeSizeInBytes":{ "shape":"long", "documentation":"

The size of the volume in bytes.

" @@ -3489,7 +3632,11 @@ "shape":"VolumeUsedInBytes", "documentation":"

The size of the data stored on the volume in bytes.

This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.

" }, - "KMSKey":{"shape":"KMSKey"} + "KMSKey":{"shape":"KMSKey"}, + "TargetName":{ + "shape":"TargetName", + "documentation":"

The name of the iSCSI target that is used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume.

" + } }, "documentation":"

Describes an iSCSI stored volume.

" }, @@ -4063,6 +4210,11 @@ "type":"list", "member":{"shape":"VolumeARN"} }, + "VolumeAttachmentStatus":{ + "type":"string", + "max":50, + "min":3 + }, "VolumeId":{ "type":"string", "max":30, @@ -4088,7 +4240,8 @@ "VolumeSizeInBytes":{ "shape":"long", "documentation":"

The size of the volume in bytes.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" - } + }, + "VolumeAttachmentStatus":{"shape":"VolumeAttachmentStatus"} }, "documentation":"

Describes a storage volume object.

" }, diff --git a/botocore/data/transfer/2018-11-05/paginators-1.json b/botocore/data/transfer/2018-11-05/paginators-1.json index ea142457..cef79508 100644 --- a/botocore/data/transfer/2018-11-05/paginators-1.json +++ b/botocore/data/transfer/2018-11-05/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListServers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Servers" + } + } } diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index 2a6fcb2e..1d343834 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -307,7 +307,7 @@ "members":{ "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", - "documentation":"

An array containing all of the information required to call a customer-supplied authentication API. This parameter is not required when the IdentityProviderType value of server that is created uses the SERVICE_MANAGED authentication method.

" + "documentation":"

An array containing all of the information required to call a customer-supplied authentication API. This parameter is not required when the IdentityProviderType value of server that is created uses the SERVICE_MANAGED authentication method.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", @@ -701,8 +701,14 @@ "shape":"Arn", "documentation":"

Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific AWS resource, such as a server, user, or role.

" }, - "MaxResults":{"shape":"MaxResults"}, - "NextToken":{"shape":"NextToken"} + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the number of tags to return as a response to the ListTagsForResource request.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When you request additional results from the ListTagsForResource call, a NextToken parameter is returned in the input. You can then pass in a subsequent command the NextToken parameter to continue listing additional tags.

" + } } }, "ListTagsForResourceResponse":{ @@ -712,7 +718,10 @@ "shape":"Arn", "documentation":"

This value is the ARN you specified to list the tags of.

" }, - "NextToken":{"shape":"NextToken"}, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When you can get additional results from the ListTagsForResource call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional tags.

" + }, "Tags":{ "shape":"Tags", "documentation":"

Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define that you can use for any purpose.

" @@ -729,7 +738,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When you can get additional results from the ListUsersListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional users.

" + "documentation":"

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional users.

" }, "ServerId":{ "shape":"ServerId", @@ -832,7 +841,7 @@ "Message":{"type":"string"}, "NextToken":{ "type":"string", - "max":256, + "max":6144, "min":1 }, "NullableRole":{ @@ -1152,5 +1161,6 @@ "pattern":"^[a-z0-9]{3,32}$" }, "UserPassword":{"type":"string"} - } + }, + "documentation":"

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and setup.

" } diff --git a/botocore/data/translate/2017-07-01/paginators-1.json b/botocore/data/translate/2017-07-01/paginators-1.json index ea142457..6898cd44 100644 --- a/botocore/data/translate/2017-07-01/paginators-1.json +++ b/botocore/data/translate/2017-07-01/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListTerminologies": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TerminologyPropertiesList" + } + } } diff --git a/botocore/data/waf/2015-08-24/paginators-1.json b/botocore/data/waf/2015-08-24/paginators-1.json index ae6668d9..9f2eba80 100644 --- a/botocore/data/waf/2015-08-24/paginators-1.json +++ b/botocore/data/waf/2015-08-24/paginators-1.json @@ -41,6 +41,59 @@ "output_token": "NextMarker", "limit_key": "Limit", "result_key": "XssMatchSets" + }, + "GetRateBasedRuleManagedKeys": { + "input_token": "NextMarker", + "output_token": "NextMarker", + "result_key": "ManagedKeys" + }, + "ListActivatedRulesInRuleGroup": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "ActivatedRules" + }, + "ListGeoMatchSets": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "GeoMatchSets" + }, + "ListLoggingConfigurations": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "LoggingConfigurations" + }, + "ListRateBasedRules": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "Rules" + }, + "ListRegexMatchSets": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "RegexMatchSets" + }, + "ListRegexPatternSets": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "RegexPatternSets" + }, + "ListRuleGroups": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "RuleGroups" + }, + "ListSubscribedRuleGroups": { + "input_token": "NextMarker", + "limit_key": "Limit", + "output_token": "NextMarker", + "result_key": "RuleGroups" } } } diff --git a/botocore/data/workdocs/2016-05-01/paginators-1.json b/botocore/data/workdocs/2016-05-01/paginators-1.json index 3dc6d7e8..a7cade37 100644 --- a/botocore/data/workdocs/2016-05-01/paginators-1.json +++ b/botocore/data/workdocs/2016-05-01/paginators-1.json @@ -20,6 +20,42 @@ "limit_key": "Limit", "output_token": "Marker", "result_key": "Users" + }, + "DescribeActivities": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "UserActivities" + }, + "DescribeComments": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Comments" + }, + "DescribeGroups": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Groups" + }, + "DescribeNotificationSubscriptions": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Subscriptions" + }, + "DescribeResourcePermissions": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Principals" + }, + "DescribeRootFolders": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Folders" } } } diff --git a/botocore/data/workdocs/2016-05-01/service-2.json b/botocore/data/workdocs/2016-05-01/service-2.json index 92617a27..84fdf10c 100644 --- a/botocore/data/workdocs/2016-05-01/service-2.json +++ b/botocore/data/workdocs/2016-05-01/service-2.json @@ -160,7 +160,7 @@ {"shape":"TooManySubscriptionsException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Configure Amazon WorkDocs to use Amazon SNS notifications. The endpoint receives a confirmation message, and must confirm the subscription.

For more information, see Subscribe to Notifications in the Amazon WorkDocs Developer Guide.

" + "documentation":"

Configure Amazon WorkDocs to use Amazon SNS notifications. The endpoint receives a confirmation message, and must confirm the subscription.

For more information, see Subscribe to Notifications in the Amazon WorkDocs Developer Guide.

" }, "CreateUser":{ "name":"CreateUser", @@ -487,7 +487,7 @@ {"shape":"FailedDependencyException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Describes the current user's special folders; the RootFolder and the RecycleBin. RootFolder is the root of user's files and folders and RecycleBin is the root of recycled items. This is not a valid action for SigV4 (administrative API) clients.

This action requires an authentication token. To get an authentication token, register an application with Amazon WorkDocs. For more information, see Authentication and Access Control for User Applications in the Amazon WorkDocs Developer Guide.

" + "documentation":"

Describes the current user's special folders; the RootFolder and the RecycleBin. RootFolder is the root of user's files and folders and RecycleBin is the root of recycled items. This is not a valid action for SigV4 (administrative API) clients.

This action requires an authentication token. To get an authentication token, register an application with Amazon WorkDocs. For more information, see Authentication and Access Control for User Applications in the Amazon WorkDocs Developer Guide.

" }, "DescribeUsers":{ "name":"DescribeUsers", @@ -1263,7 +1263,7 @@ }, "Endpoint":{ "shape":"SubscriptionEndPointType", - "documentation":"

The endpoint to receive the notifications. If the protocol is HTTPS, the endpoint is a URL that begins with \"https://\".

" + "documentation":"

The endpoint to receive the notifications. If the protocol is HTTPS, the endpoint is a URL that begins with https.

" }, "Protocol":{ "shape":"SubscriptionProtocolType", diff --git a/botocore/data/worklink/2018-09-25/paginators-1.json b/botocore/data/worklink/2018-09-25/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/worklink/2018-09-25/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/worklink/2018-09-25/service-2.json b/botocore/data/worklink/2018-09-25/service-2.json new file mode 100644 index 00000000..8f1f447e --- /dev/null +++ b/botocore/data/worklink/2018-09-25/service-2.json @@ -0,0 +1,1170 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-09-25", + "endpointPrefix":"worklink", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"WorkLink", + "serviceFullName":"Amazon WorkLink", + "serviceId":"WorkLink", + "signatureVersion":"v4", + "signingName":"worklink", + "uid":"worklink-2018-09-25" + }, + "operations":{ + "AssociateWebsiteCertificateAuthority":{ + "name":"AssociateWebsiteCertificateAuthority", + "http":{ + "method":"POST", + "requestUri":"/associateWebsiteCertificateAuthority" + }, + "input":{"shape":"AssociateWebsiteCertificateAuthorityRequest"}, + "output":{"shape":"AssociateWebsiteCertificateAuthorityResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Imports the root certificate of a certificate authority (CA) used to obtain TLS certificates used by associated websites within the company network.

" + }, + "CreateFleet":{ + "name":"CreateFleet", + "http":{ + "method":"POST", + "requestUri":"/createFleet" + }, + "input":{"shape":"CreateFleetRequest"}, + "output":{"shape":"CreateFleetResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Creates a fleet. A fleet consists of resources and the configuration that delivers associated websites to authorized users who download and set up the Amazon WorkLink app.

" + }, + "DeleteFleet":{ + "name":"DeleteFleet", + "http":{ + "method":"POST", + "requestUri":"/deleteFleet" + }, + "input":{"shape":"DeleteFleetRequest"}, + "output":{"shape":"DeleteFleetResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Deletes a fleet. Prevents users from accessing previously associated websites.

" + }, + "DescribeAuditStreamConfiguration":{ + "name":"DescribeAuditStreamConfiguration", + "http":{ + "method":"POST", + "requestUri":"/describeAuditStreamConfiguration" + }, + "input":{"shape":"DescribeAuditStreamConfigurationRequest"}, + "output":{"shape":"DescribeAuditStreamConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Describes the configuration for delivering audit streams to the customer account.

" + }, + "DescribeCompanyNetworkConfiguration":{ + "name":"DescribeCompanyNetworkConfiguration", + "http":{ + "method":"POST", + "requestUri":"/describeCompanyNetworkConfiguration" + }, + "input":{"shape":"DescribeCompanyNetworkConfigurationRequest"}, + "output":{"shape":"DescribeCompanyNetworkConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Describes the networking configuration to access the internal websites associated with the specified fleet.

" + }, + "DescribeDevice":{ + "name":"DescribeDevice", + "http":{ + "method":"POST", + "requestUri":"/describeDevice" + }, + "input":{"shape":"DescribeDeviceRequest"}, + "output":{"shape":"DescribeDeviceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Provides information about a user's device.

" + }, + "DescribeDevicePolicyConfiguration":{ + "name":"DescribeDevicePolicyConfiguration", + "http":{ + "method":"POST", + "requestUri":"/describeDevicePolicyConfiguration" + }, + "input":{"shape":"DescribeDevicePolicyConfigurationRequest"}, + "output":{"shape":"DescribeDevicePolicyConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Describes the device policy configuration for the specified fleet.

" + }, + "DescribeFleetMetadata":{ + "name":"DescribeFleetMetadata", + "http":{ + "method":"POST", + "requestUri":"/describeFleetMetadata" + }, + "input":{"shape":"DescribeFleetMetadataRequest"}, + "output":{"shape":"DescribeFleetMetadataResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Provides basic information for the specified fleet, excluding identity provider, networking, and device configuration details.

" + }, + "DescribeIdentityProviderConfiguration":{ + "name":"DescribeIdentityProviderConfiguration", + "http":{ + "method":"POST", + "requestUri":"/describeIdentityProviderConfiguration" + }, + "input":{"shape":"DescribeIdentityProviderConfigurationRequest"}, + "output":{"shape":"DescribeIdentityProviderConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Describes the identity provider configuration of the specified fleet.

" + }, + "DescribeWebsiteCertificateAuthority":{ + "name":"DescribeWebsiteCertificateAuthority", + "http":{ + "method":"POST", + "requestUri":"/describeWebsiteCertificateAuthority" + }, + "input":{"shape":"DescribeWebsiteCertificateAuthorityRequest"}, + "output":{"shape":"DescribeWebsiteCertificateAuthorityResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Provides information about the certificate authority.

" + }, + "DisassociateWebsiteCertificateAuthority":{ + "name":"DisassociateWebsiteCertificateAuthority", + "http":{ + "method":"POST", + "requestUri":"/disassociateWebsiteCertificateAuthority" + }, + "input":{"shape":"DisassociateWebsiteCertificateAuthorityRequest"}, + "output":{"shape":"DisassociateWebsiteCertificateAuthorityResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Removes a certificate authority (CA).

" + }, + "ListDevices":{ + "name":"ListDevices", + "http":{ + "method":"POST", + "requestUri":"/listDevices" + }, + "input":{"shape":"ListDevicesRequest"}, + "output":{"shape":"ListDevicesResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Retrieves a list of devices registered with the specified fleet.

" + }, + "ListFleets":{ + "name":"ListFleets", + "http":{ + "method":"POST", + "requestUri":"/listFleets" + }, + "input":{"shape":"ListFleetsRequest"}, + "output":{"shape":"ListFleetsResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Retrieves a list of fleets for the current account and Region.

" + }, + "ListWebsiteCertificateAuthorities":{ + "name":"ListWebsiteCertificateAuthorities", + "http":{ + "method":"POST", + "requestUri":"/listWebsiteCertificateAuthorities" + }, + "input":{"shape":"ListWebsiteCertificateAuthoritiesRequest"}, + "output":{"shape":"ListWebsiteCertificateAuthoritiesResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Retrieves a list of certificate authorities added for the current account and Region.

" + }, + "SignOutUser":{ + "name":"SignOutUser", + "http":{ + "method":"POST", + "requestUri":"/signOutUser" + }, + "input":{"shape":"SignOutUserRequest"}, + "output":{"shape":"SignOutUserResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Signs the user out from all of their devices. The user can sign in again if they have valid credentials.

" + }, + "UpdateAuditStreamConfiguration":{ + "name":"UpdateAuditStreamConfiguration", + "http":{ + "method":"POST", + "requestUri":"/updateAuditStreamConfiguration" + }, + "input":{"shape":"UpdateAuditStreamConfigurationRequest"}, + "output":{"shape":"UpdateAuditStreamConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates the audit stream configuration for the fleet.

" + }, + "UpdateCompanyNetworkConfiguration":{ + "name":"UpdateCompanyNetworkConfiguration", + "http":{ + "method":"POST", + "requestUri":"/updateCompanyNetworkConfiguration" + }, + "input":{"shape":"UpdateCompanyNetworkConfigurationRequest"}, + "output":{"shape":"UpdateCompanyNetworkConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates the company network configuration for the fleet.

" + }, + "UpdateDevicePolicyConfiguration":{ + "name":"UpdateDevicePolicyConfiguration", + "http":{ + "method":"POST", + "requestUri":"/updateDevicePolicyConfiguration" + }, + "input":{"shape":"UpdateDevicePolicyConfigurationRequest"}, + "output":{"shape":"UpdateDevicePolicyConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates the device policy configuration for the fleet.

" + }, + "UpdateFleetMetadata":{ + "name":"UpdateFleetMetadata", + "http":{ + "method":"POST", + "requestUri":"/UpdateFleetMetadata" + }, + "input":{"shape":"UpdateFleetMetadataRequest"}, + "output":{"shape":"UpdateFleetMetadataResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates fleet metadata, such as DisplayName.

" + }, + "UpdateIdentityProviderConfiguration":{ + "name":"UpdateIdentityProviderConfiguration", + "http":{ + "method":"POST", + "requestUri":"/updateIdentityProviderConfiguration" + }, + "input":{"shape":"UpdateIdentityProviderConfigurationRequest"}, + "output":{"shape":"UpdateIdentityProviderConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates the identity provider configuration for the fleet.

" + } + }, + "shapes":{ + "AssociateWebsiteCertificateAuthorityRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "Certificate" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "Certificate":{ + "shape":"Certificate", + "documentation":"

The root certificate of the CA.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The certificate name to display.

" + } + } + }, + "AssociateWebsiteCertificateAuthorityResponse":{ + "type":"structure", + "members":{ + "WebsiteCaId":{ + "shape":"Id", + "documentation":"

A unique identifier for the CA.

" + } + } + }, + "AuditStreamArn":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "Certificate":{ + "type":"string", + "max":8192, + "min":1, + "pattern":"-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" + }, + "CertificateChain":{ + "type":"string", + "max":32768, + "min":1, + "pattern":"(-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}\\u000D?\\u000A)*-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" + }, + "CompanyCode":{ + "type":"string", + "max":32, + "min":1 + }, + "CreateFleetRequest":{ + "type":"structure", + "required":["FleetName"], + "members":{ + "FleetName":{ + "shape":"FleetName", + "documentation":"

A unique name for the fleet.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The fleet name to display.

" + }, + "OptimizeForEndUserLocation":{ + "shape":"Boolean", + "documentation":"

The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.

" + } + } + }, + "CreateFleetResponse":{ + "type":"structure", + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DateTime":{"type":"timestamp"}, + "DeleteFleetRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DeleteFleetResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeAuditStreamConfigurationRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DescribeAuditStreamConfigurationResponse":{ + "type":"structure", + "members":{ + "AuditStreamArn":{ + "shape":"AuditStreamArn", + "documentation":"

The ARN of the Amazon Kinesis data stream that will receive the audit events.

" + } + } + }, + "DescribeCompanyNetworkConfigurationRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DescribeCompanyNetworkConfigurationResponse":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

The VPC with connectivity to associated websites.

" + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The subnets used for X-ENI connections from Amazon WorkLink rendering containers.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The security groups associated with access to the provided subnets.

" + } + } + }, + "DescribeDevicePolicyConfigurationRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DescribeDevicePolicyConfigurationResponse":{ + "type":"structure", + "members":{ + "DeviceCaCertificate":{ + "shape":"Certificate", + "documentation":"

The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.

" + } + } + }, + "DescribeDeviceRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "DeviceId" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "DeviceId":{ + "shape":"Id", + "documentation":"

A unique identifier for a registered user's device.

" + } + } + }, + "DescribeDeviceResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"DeviceStatus", + "documentation":"

The current state of the device.

" + }, + "Model":{ + "shape":"DeviceModel", + "documentation":"

The model of the device.

" + }, + "Manufacturer":{ + "shape":"DeviceManufacturer", + "documentation":"

The manufacturer of the device.

" + }, + "OperatingSystem":{ + "shape":"DeviceOperatingSystemName", + "documentation":"

The operating system of the device.

" + }, + "OperatingSystemVersion":{ + "shape":"DeviceOperatingSystemVersion", + "documentation":"

The operating system version of the device.

" + }, + "PatchLevel":{ + "shape":"DevicePatchLevel", + "documentation":"

The operating system patch level of the device.

" + }, + "FirstAccessedTime":{ + "shape":"DateTime", + "documentation":"

The date that the device first signed in to Amazon WorkLink.

" + }, + "LastAccessedTime":{ + "shape":"DateTime", + "documentation":"

The date that the device last accessed Amazon WorkLink.

" + }, + "Username":{ + "shape":"Username", + "documentation":"

The user name associated with the device.

" + } + } + }, + "DescribeFleetMetadataRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DescribeFleetMetadataResponse":{ + "type":"structure", + "members":{ + "CreatedTime":{ + "shape":"DateTime", + "documentation":"

The time that the fleet was created.

" + }, + "LastUpdatedTime":{ + "shape":"DateTime", + "documentation":"

The time that the fleet was last updated.

" + }, + "FleetName":{ + "shape":"FleetName", + "documentation":"

The name of the fleet.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The name to display.

" + }, + "OptimizeForEndUserLocation":{ + "shape":"Boolean", + "documentation":"

The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.

" + }, + "CompanyCode":{ + "shape":"CompanyCode", + "documentation":"

The identifier used by users to sign in to the Amazon WorkLink app.

" + }, + "FleetStatus":{ + "shape":"FleetStatus", + "documentation":"

The current state of the fleet.

" + } + } + }, + "DescribeIdentityProviderConfigurationRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + } + } + }, + "DescribeIdentityProviderConfigurationResponse":{ + "type":"structure", + "members":{ + "IdentityProviderType":{ + "shape":"IdentityProviderType", + "documentation":"

The type of identity provider.

" + }, + "ServiceProviderSamlMetadata":{ + "shape":"SamlMetadata", + "documentation":"

The SAML metadata document uploaded to the user’s identity provider.

" + }, + "IdentityProviderSamlMetadata":{ + "shape":"SamlMetadata", + "documentation":"

The SAML metadata document provided by the user’s identity provider.

" + } + } + }, + "DescribeWebsiteCertificateAuthorityRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "WebsiteCaId" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "WebsiteCaId":{ + "shape":"Id", + "documentation":"

A unique identifier for the certificate authority.

" + } + } + }, + "DescribeWebsiteCertificateAuthorityResponse":{ + "type":"structure", + "members":{ + "Certificate":{ + "shape":"Certificate", + "documentation":"

The root certificate of the certificate authority.

" + }, + "CreatedTime":{ + "shape":"DateTime", + "documentation":"

The time that the certificate authority was added.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The certificate name to display.

" + } + } + }, + "DeviceManufacturer":{ + "type":"string", + "max":256, + "min":1 + }, + "DeviceModel":{ + "type":"string", + "max":256, + "min":1 + }, + "DeviceOperatingSystemName":{ + "type":"string", + "max":256, + "min":1 + }, + "DeviceOperatingSystemVersion":{ + "type":"string", + "max":256, + "min":1 + }, + "DevicePatchLevel":{ + "type":"string", + "max":256, + "min":1 + }, + "DeviceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "SIGNED_OUT" + ] + }, + "DeviceSummary":{ + "type":"structure", + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

The ID of the device.

" + }, + "DeviceStatus":{ + "shape":"DeviceStatus", + "documentation":"

The status of the device.

" + } + }, + "documentation":"

The summary of devices.

" + }, + "DeviceSummaryList":{ + "type":"list", + "member":{"shape":"DeviceSummary"} + }, + "DisassociateWebsiteCertificateAuthorityRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "WebsiteCaId" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "WebsiteCaId":{ + "shape":"Id", + "documentation":"

A unique identifier for the CA.

" + } + } + }, + "DisassociateWebsiteCertificateAuthorityResponse":{ + "type":"structure", + "members":{ + } + }, + "DisplayName":{ + "type":"string", + "max":100 + }, + "ExceptionMessage":{"type":"string"}, + "FleetArn":{ + "type":"string", + "max":2048, + "min":20 + }, + "FleetName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"^[a-z0-9](?:[a-z0-9\\-]{0,46}[a-z0-9])?$" + }, + "FleetStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "DELETED", + "FAILED_TO_CREATE", + "FAILED_TO_DELETE" + ] + }, + "FleetSummary":{ + "type":"structure", + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "CreatedTime":{ + "shape":"DateTime", + "documentation":"

The time when the fleet was created.

" + }, + "LastUpdatedTime":{ + "shape":"DateTime", + "documentation":"

The time when the fleet was last updated.

" + }, + "FleetName":{ + "shape":"FleetName", + "documentation":"

The name of the fleet.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The name to display.

" + }, + "CompanyCode":{ + "shape":"CompanyCode", + "documentation":"

The identifier used by users to sign into the Amazon WorkLink app.

" + }, + "FleetStatus":{ + "shape":"FleetStatus", + "documentation":"

The status of the fleet.

" + } + }, + "documentation":"

The summary of the fleet.

" + }, + "FleetSummaryList":{ + "type":"list", + "member":{"shape":"FleetSummary"} + }, + "Id":{ + "type":"string", + "max":256, + "min":1 + }, + "IdentityProviderType":{ + "type":"string", + "enum":["SAML"] + }, + "InternalServerErrorException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The service is temporarily unavailable.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The request is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListDevicesRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be included in the next page.

" + } + } + }, + "ListDevicesResponse":{ + "type":"structure", + "members":{ + "Devices":{ + "shape":"DeviceSummaryList", + "documentation":"

Information about the devices.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.

" + } + } + }, + "ListFleetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be included in the next page.

" + } + } + }, + "ListFleetsResponse":{ + "type":"structure", + "members":{ + "FleetSummaryList":{ + "shape":"FleetSummaryList", + "documentation":"

The summary list of the fleets.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.

" + } + } + }, + "ListWebsiteCertificateAuthoritiesRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be included in the next page.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

" + } + } + }, + "ListWebsiteCertificateAuthoritiesResponse":{ + "type":"structure", + "members":{ + "WebsiteCertificateAuthorities":{ + "shape":"WebsiteCaSummaryList", + "documentation":"

Information about the certificates.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "min":1 + }, + "NextToken":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[\\w\\-]+" + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The resource already exists.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The requested resource was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SamlMetadata":{ + "type":"string", + "max":204800, + "min":1 + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"^sg-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"} + }, + "SignOutUserRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "Username" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "Username":{ + "shape":"Username", + "documentation":"

The name of the user.

" + } + } + }, + "SignOutUserResponse":{ + "type":"structure", + "members":{ + } + }, + "SubnetId":{ + "type":"string", + "pattern":"^subnet-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The number of requests exceeds the limit.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

You are not authorized to perform this action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "UpdateAuditStreamConfigurationRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "AuditStreamArn":{ + "shape":"AuditStreamArn", + "documentation":"

The ARN of the Amazon Kinesis data stream that receives the audit events.

" + } + } + }, + "UpdateAuditStreamConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCompanyNetworkConfigurationRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "VpcId", + "SubnetIds", + "SecurityGroupIds" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The VPC with connectivity to associated websites.

" + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The subnets used for X-ENI connections from Amazon WorkLink rendering containers.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The security groups associated with access to the provided subnets.

" + } + } + }, + "UpdateCompanyNetworkConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDevicePolicyConfigurationRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "DeviceCaCertificate":{ + "shape":"CertificateChain", + "documentation":"

The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.

" + } + } + }, + "UpdateDevicePolicyConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateFleetMetadataRequest":{ + "type":"structure", + "required":["FleetArn"], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The fleet name to display. The existing DisplayName is unset if null is passed.

" + }, + "OptimizeForEndUserLocation":{ + "shape":"Boolean", + "documentation":"

The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.

" + } + } + }, + "UpdateFleetMetadataResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateIdentityProviderConfigurationRequest":{ + "type":"structure", + "required":[ + "FleetArn", + "IdentityProviderType" + ], + "members":{ + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The ARN of the fleet.

" + }, + "IdentityProviderType":{ + "shape":"IdentityProviderType", + "documentation":"

The type of identity provider.

" + }, + "IdentityProviderSamlMetadata":{ + "shape":"SamlMetadata", + "documentation":"

The SAML metadata document provided by the customer’s identity provider. The existing IdentityProviderSamlMetadata is unset if null is passed.

" + } + } + }, + "UpdateIdentityProviderConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "Username":{ + "type":"string", + "max":256, + "min":1 + }, + "VpcId":{ + "type":"string", + "pattern":"^vpc-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "WebsiteCaSummary":{ + "type":"structure", + "members":{ + "WebsiteCaId":{ + "shape":"Id", + "documentation":"

A unique identifier for the CA.

" + }, + "CreatedTime":{ + "shape":"DateTime", + "documentation":"

The time when the CA was added.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The name to display.

" + } + }, + "documentation":"

The summary of the certificate authority (CA).

" + }, + "WebsiteCaSummaryList":{ + "type":"list", + "member":{"shape":"WebsiteCaSummary"} + } + }, + "documentation":"

Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices.

" +} diff --git a/botocore/data/workmail/2017-10-01/paginators-1.json b/botocore/data/workmail/2017-10-01/paginators-1.json index d5de8253..511c0ef6 100644 --- a/botocore/data/workmail/2017-10-01/paginators-1.json +++ b/botocore/data/workmail/2017-10-01/paginators-1.json @@ -35,6 +35,18 @@ "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" + }, + "ListMailboxPermissions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Permissions" + }, + "ListResourceDelegates": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Delegates" } } } diff --git a/botocore/data/workspaces/2015-04-08/paginators-1.json b/botocore/data/workspaces/2015-04-08/paginators-1.json index efa8cbad..57e10e02 100644 --- a/botocore/data/workspaces/2015-04-08/paginators-1.json +++ b/botocore/data/workspaces/2015-04-08/paginators-1.json @@ -15,6 +15,34 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": "Workspaces" + }, + "DescribeAccountModifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "AccountModifications" + }, + "DescribeIpGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Result" + }, + "DescribeWorkspaceImages": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Images" + }, + "DescribeWorkspacesConnectionStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "WorkspacesConnectionStatus" + }, + "ListAvailableManagementCidrRanges": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ManagementCidrRanges" } } } diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 07a1aa6b..f4084405 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -1372,7 +1372,10 @@ }, "ModifyClientPropertiesRequest":{ "type":"structure", - "required":["ResourceId"], + "required":[ + "ResourceId", + "ClientProperties" + ], "members":{ "ResourceId":{ "shape":"NonEmptyString", diff --git a/botocore/data/xray/2016-04-12/paginators-1.json b/botocore/data/xray/2016-04-12/paginators-1.json index 00d9811c..c026029d 100644 --- a/botocore/data/xray/2016-04-12/paginators-1.json +++ b/botocore/data/xray/2016-04-12/paginators-1.json @@ -19,6 +19,21 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": "TraceSummaries" + }, + "GetGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Groups" + }, + "GetSamplingRules": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "SamplingRuleRecords" + }, + "GetSamplingStatisticSummaries": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "SamplingStatisticSummaries" } } } diff --git a/botocore/docs/service.py b/botocore/docs/service.py index c9b5d7b3..92bc94b5 100644 --- a/botocore/docs/service.py +++ b/botocore/docs/service.py @@ -26,6 +26,7 @@ class ServiceDocumenter(object): self._client = self._session.create_client( service_name, region_name='us-east-1', aws_access_key_id='foo', aws_secret_access_key='bar') + self._event_emitter = self._client.meta.events self.sections = [ 'title', @@ -52,6 +53,11 @@ class ServiceDocumenter(object): def title(self, section): section.style.h1(self._client.__class__.__name__) + self._event_emitter.emit( + 'docs.%s.%s' % ('title', + self._service_name), + section=section + ) def table_of_contents(self, section): section.style.table_of_contents(title='Table of Contents', depth=2) diff --git a/botocore/eventstream.py b/botocore/eventstream.py index ea8a076d..ddb822dd 100644 --- a/botocore/eventstream.py +++ b/botocore/eventstream.py @@ -61,6 +61,17 @@ class ChecksumMismatch(ParserError): super(ChecksumMismatch, self).__init__(message) +class NoInitialResponseError(ParserError): + """An event of type initial-response was not received. + + This exception is raised when the event stream produced no events or + the first event in the stream was not of the initial-response type. + """ + def __init__(self): + message = 'First event was not of the initial-response type' + super(NoInitialResponseError, self).__init__(message) + + class DecodeUtils(object): """Unpacking utility functions used in the decoder. @@ -312,7 +323,8 @@ class EventStreamMessage(object): self.crc = crc def to_response_dict(self, status_code=200): - if self.headers.get(':message-type') == 'error': + message_type = self.headers.get(':message-type') + if message_type == 'error' or message_type == 'exception': status_code = 400 return { 'status_code': status_code, @@ -552,15 +564,20 @@ class EventStream(object): self._output_shape = output_shape self._operation_name = operation_name self._parser = parser - self._buffer = EventStreamBuffer() + self._event_generator = self._create_raw_event_generator() def __iter__(self): - for data in self._raw_stream.stream(): - self._buffer.add_data(data) - for event in self._buffer: - parsed_event = self._parse_event(event) - if parsed_event: - yield parsed_event + for event in self._event_generator: + parsed_event = self._parse_event(event) + if parsed_event: + yield parsed_event + + def _create_raw_event_generator(self): + event_stream_buffer = EventStreamBuffer() + for chunk in self._raw_stream.stream(): + event_stream_buffer.add_data(chunk) + for event in event_stream_buffer: + yield event def _parse_event(self, event): response_dict = event.to_response_dict() @@ -570,6 +587,16 @@ class EventStream(object): else: raise EventStreamError(parsed_response, self._operation_name) + def get_initial_response(self): + try: + initial_event = next(self._event_generator) + event_type = initial_event.headers.get(':event-type') + if event_type == 'initial-response': + return initial_event + except StopIteration: + pass + raise NoInitialResponseError() + def close(self): """Closes the underlying streaming body. """ self._raw_stream.close() diff --git a/botocore/handlers.py b/botocore/handlers.py index 2c009262..81af758e 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -338,6 +338,21 @@ def add_expect_header(model, params, **kwargs): params['headers']['Expect'] = '100-continue' +class DeprecatedServiceDocumenter(object): + def __init__(self, replacement_service_name): + self._replacement_service_name = replacement_service_name + + def inject_deprecation_notice(self, section, event_name, **kwargs): + section.style.start_important() + section.write('This service client is deprecated. Please use ') + section.style.ref( + self._replacement_service_name, + self._replacement_service_name, + ) + section.write(' instead.') + section.style.end_important() + + def document_copy_source_form(section, event_name, **kwargs): if 'request-example' in event_name: parent = section.get_section('structure-value') @@ -870,12 +885,6 @@ class ClientMethodAlias(object): return getattr(client, self._actual) -def remove_subscribe_to_shard(class_attributes, **kwargs): - if 'subscribe_to_shard' in class_attributes: - # subscribe_to_shard requires HTTP 2 support - del class_attributes['subscribe_to_shard'] - - class HeaderToHostHoister(object): """Takes a header and moves it to the front of the hoststring. """ @@ -930,7 +939,6 @@ BUILTIN_HANDLERS = [ convert_body_to_file_like_object, REGISTER_LAST), ('before-parameter-build.s3.PutObject', convert_body_to_file_like_object, REGISTER_LAST), - ('creating-client-class.kinesis', remove_subscribe_to_shard), ('creating-client-class', add_generate_presigned_url), ('creating-client-class.s3', add_generate_presigned_post), ('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2), @@ -1109,6 +1117,12 @@ BUILTIN_HANDLERS = [ ('before-call.s3-control.*', HeaderToHostHoister('x-amz-account-id').hoist), + ########### + # SMS Voice + ########## + ('docs.title.sms-voice', + DeprecatedServiceDocumenter( + 'pinpoint-sms-voice').inject_deprecation_notice), ('before-call', inject_api_version_header_if_needed), ] diff --git a/botocore/model.py b/botocore/model.py index f7758580..6ff3c689 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -167,6 +167,10 @@ class Shape(object): return "<%s(%s)>" % (self.__class__.__name__, self.name) + @property + def event_stream_name(self): + return None + class StructureShape(Shape): @CachedProperty @@ -183,6 +187,13 @@ class StructureShape(Shape): shape_members[name] = self._resolve_shape_ref(shape_ref) return shape_members + @CachedProperty + def event_stream_name(self): + for member_name, member in self.members.items(): + if member.serialization.get('eventstream'): + return member_name + return None + class ListShape(Shape): @CachedProperty @@ -495,9 +506,9 @@ class OperationModel(object): """Returns the event stream member's shape if any or None otherwise.""" if shape is None: return None - for member in shape.members.values(): - if member.serialization.get('eventstream'): - return member + event_name = shape.event_stream_name + if event_name: + return shape.members[event_name] return None @CachedProperty diff --git a/botocore/monitoring.py b/botocore/monitoring.py index ba17c873..1ae1ca06 100644 --- a/botocore/monitoring.py +++ b/botocore/monitoring.py @@ -388,12 +388,25 @@ class CSMSerializer(object): def _serialize_attempts(self, attempts, event_dict, **kwargs): event_dict['AttemptCount'] = len(attempts) if attempts: + self._add_fields_from_last_attempt(event_dict, attempts[-1]) + + def _add_fields_from_last_attempt(self, event_dict, last_attempt): + if last_attempt.request_headers: # It does not matter which attempt to use to grab the region # for the ApiCall event, but SDKs typically do the last one. - last_attempt = attempts[-1] - if last_attempt.request_headers: - event_dict['Region'] = self._get_region( - last_attempt.request_headers) + region = self._get_region(last_attempt.request_headers) + if region is not None: + event_dict['Region'] = region + event_dict['UserAgent'] = self._get_user_agent( + last_attempt.request_headers) + if last_attempt.http_status_code is not None: + event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code + if last_attempt.parsed_error is not None: + self._serialize_parsed_error( + last_attempt.parsed_error, event_dict, 'ApiCall') + if last_attempt.wire_exception is not None: + self._serialize_wire_exception( + last_attempt.wire_exception, event_dict, 'ApiCall') def _serialize_latency(self, latency, event_dict, event_type): if event_type == 'ApiCall': @@ -410,16 +423,12 @@ class CSMSerializer(object): def _serialize_request_headers(self, request_headers, event_dict, **kwargs): - if 'User-Agent' in request_headers: - event_dict['UserAgent'] = self._truncate( - ensure_unicode(request_headers['User-Agent']), - self._MAX_USER_AGENT_LENGTH - ) + event_dict['UserAgent'] = self._get_user_agent(request_headers) if self._is_signed(request_headers): event_dict['AccessKey'] = self._get_access_key(request_headers) - region = self._get_region(request_headers) - if region is not None: - event_dict['Region'] = region + region = self._get_region(request_headers) + if region is not None: + event_dict['Region'] = region if 'X-Amz-Security-Token' in request_headers: event_dict['SessionToken'] = request_headers[ 'X-Amz-Security-Token'] @@ -434,17 +443,21 @@ class CSMSerializer(object): if header in response_headers: event_dict[entry] = response_headers[header] - def _serialize_parsed_error(self, parsed_error, event_dict, **kwargs): - event_dict['AwsException'] = self._truncate( + def _serialize_parsed_error(self, parsed_error, event_dict, event_type, + **kwargs): + field_prefix = 'Final' if event_type == 'ApiCall' else '' + event_dict[field_prefix + 'AwsException'] = self._truncate( parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH) - event_dict['AwsExceptionMessage'] = self._truncate( + event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate( parsed_error['Message'], self._MAX_MESSAGE_LENGTH) - def _serialize_wire_exception(self, wire_exception, event_dict, **kwargs): - event_dict['SdkException'] = self._truncate( + def _serialize_wire_exception(self, wire_exception, event_dict, event_type, + **kwargs): + field_prefix = 'Final' if event_type == 'ApiCall' else '' + event_dict[field_prefix + 'SdkException'] = self._truncate( wire_exception.__class__.__name__, self._MAX_EXCEPTION_CLASS_LENGTH) - event_dict['SdkExceptionMessage'] = self._truncate( + event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate( str(wire_exception), self._MAX_MESSAGE_LENGTH) def _get_event_type(self, event): @@ -459,12 +472,20 @@ class CSMSerializer(object): return auth_match.group('access_key') def _get_region(self, request_headers): + if not self._is_signed(request_headers): + return None auth_val = self._get_auth_value(request_headers) signature_version, auth_match = self._get_auth_match(auth_val) if signature_version != 'v4': return None return auth_match.group('signing_region') + def _get_user_agent(self, request_headers): + return self._truncate( + ensure_unicode(request_headers.get('User-Agent', '')), + self._MAX_USER_AGENT_LENGTH + ) + def _is_signed(self, request_headers): return 'Authorization' in request_headers diff --git a/botocore/parsers.py b/botocore/parsers.py index 0a3b0087..1452900f 100644 --- a/botocore/parsers.py +++ b/botocore/parsers.py @@ -121,7 +121,7 @@ import xml.etree.cElementTree import logging from botocore.compat import six, XMLParseError -from botocore.eventstream import EventStream +from botocore.eventstream import EventStream, NoInitialResponseError from botocore.utils import parse_timestamp, merge_dicts, \ is_json_value_header, lowercase_dict @@ -313,6 +313,11 @@ class ResponseParser(object): def _default_handle(self, shape, value): return value + def _create_event_stream(self, response, shape): + parser = self._event_stream_parser + name = response['context'].get('operation_name') + return EventStream(response['body'], shape, parser, name) + class BaseXMLResponseParser(ResponseParser): def __init__(self, timestamp_parser=None, blob_parser=None): @@ -628,20 +633,6 @@ class BaseJSONParser(ResponseParser): return { 'message': body } -class JSONParser(BaseJSONParser): - """Response parse for the "json" protocol.""" - def _do_parse(self, response, shape): - # The json.loads() gives us the primitive JSON types, - # but we need to traverse the parsed JSON data to convert - # to richer types (blobs, timestamps, etc. - parsed = {} - if shape is not None: - original_parsed = self._parse_body_as_json(response['body']) - parsed = self._parse_shape(shape, original_parsed) - self._inject_response_metadata(parsed, response['headers']) - return parsed - - class BaseEventStreamParser(ResponseParser): def _do_parse(self, response, shape): @@ -658,12 +649,24 @@ class BaseEventStreamParser(ResponseParser): return final_parsed def _do_error_parse(self, response, shape): - error = { - 'Error': { - 'Code': response['headers'].get(':error-code', ''), - 'Message': response['headers'].get(':error-message', ''), + exception_type = response['headers'].get(':exception-type') + exception_shape = shape.members.get(exception_type) + if exception_shape is not None: + original_parsed = self._initial_body_parse(response['body']) + body = self._parse_shape(exception_shape, original_parsed) + error = { + 'Error': { + 'Code': exception_type, + 'Message': body.get('Message', body.get('message', '')) + } + } + else: + error = { + 'Error': { + 'Code': response['headers'].get(':error-code', ''), + 'Message': response['headers'].get(':error-message', ''), + } } - } return error def _parse_payload(self, response, shape, member_shapes, final_parsed): @@ -722,6 +725,42 @@ class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser): return self._parse_xml_string_to_dom(xml_string) +class JSONParser(BaseJSONParser): + + EVENT_STREAM_PARSER_CLS = EventStreamJSONParser + + """Response parser for the "json" protocol.""" + def _do_parse(self, response, shape): + parsed = {} + if shape is not None: + event_name = shape.event_stream_name + if event_name: + parsed = self._handle_event_stream(response, shape, event_name) + else: + parsed = self._handle_json_body(response['body'], shape) + self._inject_response_metadata(parsed, response['headers']) + return parsed + + def _handle_event_stream(self, response, shape, event_name): + event_stream_shape = shape.members[event_name] + event_stream = self._create_event_stream(response, event_stream_shape) + try: + event = event_stream.get_initial_response() + except NoInitialResponseError: + error_msg = 'First event was not of type initial-response' + raise ResponseParserError(error_msg) + parsed = self._handle_json_body(event.payload, shape) + parsed[event_name] = event_stream + return parsed + + def _handle_json_body(self, raw_body, shape): + # The json.loads() gives us the primitive JSON types, + # but we need to traverse the parsed JSON data to convert + # to richer types (blobs, timestamps, etc. + parsed_json = self._parse_body_as_json(raw_body) + return self._parse_shape(shape, parsed_json) + + class BaseRestParser(ResponseParser): def _do_parse(self, response, shape): @@ -756,9 +795,7 @@ class BaseRestParser(ResponseParser): payload_member_name = shape.serialization['payload'] body_shape = member_shapes[payload_member_name] if body_shape.serialization.get('eventstream'): - parser = self._event_stream_parser - name = response['context'].get('operation_name') - body = EventStream(response['body'], body_shape, parser, name) + body = self._create_event_stream(response, body_shape) final_parsed[payload_member_name] = body elif body_shape.type_name in ['string', 'blob']: # This is a stream diff --git a/botocore/response.py b/botocore/response.py index 13a15f9f..93c7937a 100644 --- a/botocore/response.py +++ b/botocore/response.py @@ -114,7 +114,8 @@ class StreamingBody(object): for line in lines[:-1]: yield line.splitlines()[0] pending = lines[-1] - yield pending.splitlines()[0] + if pending: + yield pending.splitlines()[0] def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE): """Return an iterator to yield chunks of chunk_size bytes from the raw diff --git a/docs/source/conf.py b/docs/source/conf.py index 63abdd43..efeba7dc 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.12.' +version = '1.12.1' # The full version, including alpha/beta/rc tags. -release = '1.12.71' +release = '1.12.103' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 66525be3..bd138d07 100644 --- a/setup.py +++ b/setup.py @@ -70,7 +70,7 @@ setup( ] }, license="Apache License 2.0", - classifiers=( + classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', @@ -86,5 +86,5 @@ setup( 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', - ), + ] ) diff --git a/tests/functional/csm/cases.json b/tests/functional/csm/cases.json index 8dad2b74..0aa18786 100644 --- a/tests/functional/csm/cases.json +++ b/tests/functional/csm/cases.json @@ -89,7 +89,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -137,7 +139,11 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalAwsException": "TestOperationException", + "FinalAwsExceptionMessage": "There was an error", + "FinalHttpStatusCode": 400 } ] }, @@ -203,7 +209,9 @@ "Latency": "ANY_INT", "AttemptCount": 2, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -250,7 +258,10 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalSdkException": "ANY_STR", + "FinalSdkExceptionMessage": "Unexpected exception was thrown" } ] }, @@ -315,7 +326,9 @@ "Latency": "ANY_INT", "AttemptCount": 2, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -367,7 +380,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -471,7 +486,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -551,7 +568,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -605,7 +624,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -660,7 +681,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -714,7 +737,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -761,7 +786,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -808,7 +835,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -855,7 +884,9 @@ "Latency": "ANY_INT", "AttemptCount": 1, "MaxRetriesExceeded": 0, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalHttpStatusCode": 200 } ] }, @@ -934,7 +965,11 @@ "Latency": "ANY_INT", "AttemptCount": 2, "MaxRetriesExceeded": 1, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalAwsException": "ServiceUnavailable", + "FinalAwsExceptionMessage": "Service is unavailable", + "FinalHttpStatusCode": 503 } ] }, @@ -1011,7 +1046,81 @@ "Latency": "ANY_INT", "AttemptCount": 2, "MaxRetriesExceeded": 1, - "Region": "us-west-2" + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalSdkException": "ANY_STR", + "FinalSdkExceptionMessage": "Retryable exception was thrown" + } + ] + }, + { + "description": "Test API call event uses exception data from final attempt", + "apiCalls": [ + { + "serviceId": "CSM Test", + "operationName": "TestOperation", + "params": {}, + "attemptResponses": [ + { + "sdkException": { + "isRetryable": true, + "message": "First retryable exception" + } + }, + { + "sdkException": { + "isRetryable": false, + "message": "Second un-retryable exception" + } + } + ] + } + ], + "expectedMonitoringEvents": [ + { + "Version": 1, + "Type": "ApiCallAttempt", + "Service": "CSM Test", + "Api": "TestOperation", + "ClientId": "", + "Timestamp": "ANY_INT", + "AttemptLatency": "ANY_INT", + "Fqdn": "csmtest.us-west-2.amazonaws.com", + "Region": "us-west-2", + "UserAgent": "ANY_STR", + "AccessKey": "myaccesskey", + "SdkException": "ANY_STR", + "SdkExceptionMessage": "First retryable exception" + }, + { + "Version": 1, + "Type": "ApiCallAttempt", + "Service": "CSM Test", + "Api": "TestOperation", + "ClientId": "", + "Timestamp": "ANY_INT", + "AttemptLatency": "ANY_INT", + "Fqdn": "csmtest.us-west-2.amazonaws.com", + "Region": "us-west-2", + "UserAgent": "ANY_STR", + "AccessKey": "myaccesskey", + "SdkException": "ANY_STR", + "SdkExceptionMessage": "Second un-retryable exception" + }, + { + "Version": 1, + "Type": "ApiCall", + "Service": "CSM Test", + "Api": "TestOperation", + "ClientId": "", + "Timestamp": "ANY_INT", + "Latency": "ANY_INT", + "AttemptCount": 2, + "MaxRetriesExceeded": 0, + "UserAgent": "ANY_STR", + "Region": "us-west-2", + "FinalSdkException": "ANY_STR", + "FinalSdkExceptionMessage": "Second un-retryable exception" } ] } diff --git a/tests/functional/docs/__init__.py b/tests/functional/docs/__init__.py index c6f8281e..fc756842 100644 --- a/tests/functional/docs/__init__.py +++ b/tests/functional/docs/__init__.py @@ -39,6 +39,15 @@ class BaseDocsFunctionalTest(unittest.TestCase): for line in lines: self.assertNotIn(line, contents) + def get_title_section_for(self, service_name): + contents = ServiceDocumenter( + service_name, self._session).document_service().decode('utf-8') + start_of_table_of_contents = 'Table of Contents' + start_index = contents.find(start_of_table_of_contents) + contents = contents[:start_index] + contents = contents.encode('utf-8') + return contents + def get_method_document_block(self, operation_name, contents): contents = contents.decode('utf-8') start_method_document = ' .. py:method:: %s(' % operation_name diff --git a/tests/functional/test_kinesis.py b/tests/functional/docs/test_sms_voice.py similarity index 57% rename from tests/functional/test_kinesis.py rename to tests/functional/docs/test_sms_voice.py index 0d6b525b..447e53c4 100644 --- a/tests/functional/test_kinesis.py +++ b/tests/functional/docs/test_sms_voice.py @@ -10,10 +10,15 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import BaseSessionTest +from tests.functional.docs import BaseDocsFunctionalTest -class TestKinesis(BaseSessionTest): - def test_subscribe_to_shard_removed(self): - kinesis = self.session.create_client('kinesis', 'us-west-2') - with self.assertRaises(AttributeError): - kinesis.subscribe_to_shard + +class TestSMSVoiceDocs(BaseDocsFunctionalTest): + + def test_warning_at_top(self): + docs = self.get_title_section_for('sms-voice') + self.assert_contains_lines_in_order([ + '.. warning:', + ('This service client is deprecated. Please use ' + ':doc:`pinpoint-sms-voice ` instead.'), + ], docs) diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 2dab2d18..7c82410d 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -80,6 +80,7 @@ ENDPOINT_PREFIX_OVERRIDE = { 'application-autoscaling': 'autoscaling', # For neptune, we send requests to the RDS endpoint. 'neptune': 'rds', + 'docdb': 'rds', } NOT_SUPPORTED_IN_SDK = [ 'mobileanalytics', diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py index 6e3ab72c..1c6ef44d 100644 --- a/tests/functional/test_paginator_config.py +++ b/tests/functional/test_paginator_config.py @@ -22,6 +22,112 @@ KNOWN_PAGE_KEYS = set( ['input_token', 'py_input_token', 'output_token', 'result_key', 'limit_key', 'more_results', 'non_aggregate_keys']) MEMBER_NAME_CHARS = set(string.ascii_letters + string.digits) +# The goal here should be to remove all of these by updating the paginators +# to reference all the extra output keys. Nothing should ever be added to this +# list, it represents all the current released paginators that fail this test. +KNOWN_EXTRA_OUTPUT_KEYS = [ + 'alexaforbusiness.SearchUsers.TotalCount', + 'alexaforbusiness.SearchProfiles.TotalCount', + 'alexaforbusiness.SearchSkillGroups.TotalCount', + 'alexaforbusiness.SearchDevices.TotalCount', + 'alexaforbusiness.SearchRooms.TotalCount', + 'apigateway.GetApiKeys.warnings', + 'apigateway.GetUsage.usagePlanId', + 'apigateway.GetUsage.startDate', + 'apigateway.GetUsage.endDate', + 'athena.GetQueryResults.ResultSet', + 'cloudfront.ListCloudFrontOriginAccessIdentities.CloudFrontOriginAccessIdentityList', + 'cloudfront.ListDistributions.DistributionList', + 'cloudfront.ListInvalidations.InvalidationList', + 'cloudfront.ListStreamingDistributions.StreamingDistributionList', + 'codedeploy.ListDeploymentGroups.applicationName', + 'dms.DescribeTableStatistics.ReplicationTaskArn', + 'dms.DescribeReplicationTaskAssessmentResults.BucketName', + 'ec2.DescribeSpotFleetInstances.SpotFleetRequestId', + 'ec2.DescribeVpcEndpointServices.ServiceNames', + 'efs.DescribeFileSystems.Marker', + 'efs.DescribeMountTargets.Marker', + 'efs.DescribeTags.Marker', + 'elasticache.DescribeCacheParameters.CacheNodeTypeSpecificParameters', + 'elasticache.DescribeEngineDefaultParameters.EngineDefaults', + 'glacier.ListParts.PartSizeInBytes', + 'glacier.ListParts.ArchiveDescription', + 'glacier.ListParts.MultipartUploadId', + 'glacier.ListParts.VaultARN', + 'glacier.ListParts.CreationDate', + 'kinesis.DescribeStream.StreamDescription', + 'mturk.ListAssignmentsForHIT.NumResults', + 'mturk.ListQualificationTypes.NumResults', + 'mturk.ListHITs.NumResults', + 'mturk.ListWorkerBlocks.NumResults', + 'mturk.ListReviewableHITs.NumResults', + 'mturk.ListHITsForQualificationType.NumResults', + 'mturk.ListQualificationRequests.NumResults', + 'mturk.ListWorkersWithQualificationType.NumResults', + 'mturk.ListBonusPayments.NumResults', + 'neptune.DescribeEngineDefaultParameters.EngineDefaults', + 'rds.DescribeEngineDefaultClusterParameters.EngineDefaults', + 'rds.DescribeEngineDefaultParameters.EngineDefaults', + 'redshift.DescribeDefaultClusterParameters.DefaultClusterParameters', + 'resource-groups.ListGroups.GroupIdentifiers', + 'resource-groups.SearchResources.QueryErrors', + 'resource-groups.ListGroupResources.QueryErrors', + 'route53.ListHealthChecks.MaxItems', + 'route53.ListHealthChecks.Marker', + 'route53.ListHostedZones.MaxItems', + 'route53.ListHostedZones.Marker', + 'route53.ListResourceRecordSets.MaxItems', + 's3.ListMultipartUploads.Delimiter', + 's3.ListMultipartUploads.KeyMarker', + 's3.ListMultipartUploads.Prefix', + 's3.ListMultipartUploads.Bucket', + 's3.ListMultipartUploads.MaxUploads', + 's3.ListMultipartUploads.UploadIdMarker', + 's3.ListMultipartUploads.EncodingType', + 's3.ListObjectVersions.MaxKeys', + 's3.ListObjectVersions.Delimiter', + 's3.ListObjectVersions.VersionIdMarker', + 's3.ListObjectVersions.KeyMarker', + 's3.ListObjectVersions.Prefix', + 's3.ListObjectVersions.Name', + 's3.ListObjectVersions.EncodingType', + 's3.ListObjects.MaxKeys', + 's3.ListObjects.Delimiter', + 's3.ListObjects.NextMarker', + 's3.ListObjects.Prefix', + 's3.ListObjects.Marker', + 's3.ListObjects.Name', + 's3.ListObjects.EncodingType', + 's3.ListObjectsV2.StartAfter', + 's3.ListObjectsV2.MaxKeys', + 's3.ListObjectsV2.Delimiter', + 's3.ListObjectsV2.ContinuationToken', + 's3.ListObjectsV2.KeyCount', + 's3.ListObjectsV2.Prefix', + 's3.ListObjectsV2.Name', + 's3.ListObjectsV2.EncodingType', + 's3.ListParts.PartNumberMarker', + 's3.ListParts.AbortDate', + 's3.ListParts.MaxParts', + 's3.ListParts.Bucket', + 's3.ListParts.Key', + 's3.ListParts.UploadId', + 's3.ListParts.AbortRuleId', + 's3.ListParts.RequestCharged', + 'sms.GetReplicationRuns.replicationJob', + 'sms.GetServers.lastModifiedOn', + 'sms.GetServers.serverCatalogStatus', + 'storagegateway.DescribeTapeRecoveryPoints.GatewayARN', + 'storagegateway.DescribeVTLDevices.GatewayARN', + 'storagegateway.ListVolumes.GatewayARN', + 'workdocs.DescribeUsers.TotalNumberOfUsers', + 'xray.BatchGetTraces.UnprocessedTraceIds', + 'xray.GetServiceGraph.EndTime', + 'xray.GetServiceGraph.ContainsOldGroupVersions', + 'xray.GetServiceGraph.StartTime', + 'xray.GetTraceSummaries.TracesProcessedCount', + 'xray.GetTraceSummaries.ApproximateTime', +] def test_lint_pagination_configs(): @@ -109,7 +215,7 @@ def _validate_output_keys_match(operation_name, page_config, service_model): # this is no longer a realistic thing to check. Someone would have to # backport the missing keys to all the paginators. output_shape = service_model.operation_model(operation_name).output_shape - output_members = output_shape.members + output_members = set(output_shape.members) for key_name, output_key in _get_all_page_output_keys(page_config): if _looks_like_jmespath(output_key): _validate_jmespath_compiles(output_key) @@ -118,6 +224,27 @@ def _validate_output_keys_match(operation_name, page_config, service_model): raise AssertionError("Pagination key '%s' refers to an output " "member that does not exist: %s" % ( key_name, output_key)) + output_members.remove(output_key) + + for member in list(output_members): + key = "%s.%s.%s" % (service_model.service_name, + operation_name, + member) + if key in KNOWN_EXTRA_OUTPUT_KEYS: + output_members.remove(member) + + if output_members: + for member in output_members: + key = "%s.%s.%s" % (service_model.service_name, + operation_name, + member) + with open('/tmp/blah', 'a') as f: + f.write("'%s',\n" % key) + raise AssertionError("There are member names in the output shape of " + "%s that are not accounted for in the pagination " + "config for service %s: %s" % ( + operation_name, service_model.service_name, + ', '.join(output_members))) def _looks_like_jmespath(expression): diff --git a/tests/integration/test_ec2.py b/tests/integration/test_ec2.py index c19d1c4e..4968e7e9 100644 --- a/tests/integration/test_ec2.py +++ b/tests/integration/test_ec2.py @@ -30,7 +30,8 @@ class TestEC2(unittest.TestCase): result = self.client.describe_availability_zones() zones = list( sorted(a['ZoneName'] for a in result['AvailabilityZones'])) - self.assertEqual(zones, ['us-west-2a', 'us-west-2b', 'us-west-2c']) + self.assertTrue( + set(['us-west-2a', 'us-west-2b', 'us-west-2c']).issubset(zones)) def test_get_console_output_handles_error(self): # Want to ensure the underlying ClientError is propogated diff --git a/tests/unit/protocols/output/event-stream.json b/tests/unit/protocols/output/event-stream.json deleted file mode 100644 index 0b6d75c7..00000000 --- a/tests/unit/protocols/output/event-stream.json +++ /dev/null @@ -1,174 +0,0 @@ -[ - { - "description": "REST XML Event Stream", - "metadata": { - "protocol": "rest-xml" - }, - "shapes": { - "OutputShape": { - "type": "structure", - "members": { - "Payload": {"shape": "EventStream"} - }, - "payload": "Payload" - }, - "EventStream": { - "type": "structure", - "eventstream": true, - "members": { - "TypeA": {"shape": "TypeAEvent"}, - "TypeB": {"shape": "TypeBEvent"}, - "TypeC": {"shape": "TypeCEvent"} - } - }, - "TypeAEvent": { - "type": "structure", - "event": true, - "members": { - "Payload": { - "shape": "BlobType", - "eventpayload": true - } - } - }, - "TypeBEvent": { - "type": "structure", - "event": true, - "members": { - "Details": { - "shape": "Details", - "eventpayload": true - } - } - }, - "TypeCEvent": { - "type": "structure", - "event": true, - "members": { - "Details": { - "shape": "Details", - "eventpayload": true - }, - "Boolean": { - "shape": "BooleanType", - "eventheader": true - }, - "Integer": { - "shape": "IntegerType", - "eventheader": true - }, - "Blob": { - "shape": "BlobType", - "eventheader": true - }, - "String": { - "shape": "StringType", - "eventheader": true - }, - "Timestamp": { - "shape": "TimestampType", - "eventheader": true - } - } - }, - "Details": { - "type": "structure", - "members": { - "StringField": {"shape": "StringType"}, - "IntegerField": {"shape": "IntegerType"} - } - }, - "StringType": { - "type": "string" - }, - "IntegerType": { - "type": "integer" - }, - "BooleanType": { - "type": "boolean" - }, - "TimestampType": { - "type": "timestamp" - }, - "BlobType": { - "type": "blob" - } - }, - "cases": [ - { - "given": { - "output": { - "shape": "OutputShape" - }, - "name": "OperationName" - }, - "result": { - "Payload": [ - { - "TypeA": {"Payload": "somebytes"} - }, - { - "TypeB": { - "Details": { - "StringField": "somestring", - "IntegerField": 123 - } - } - } - ] - }, - "response": { - "status_code": 200, - "headers": {}, - "body": "AAAAbAAAAFPLgkVrDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVBDTpjb250ZW50LXR5cGUHABhhcHBsaWNhdGlvbi9vY3RldC1zdHJlYW1zb21lYnl0ZXMesj2HAAAAsAAAAEOaMMdXDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVCDTpjb250ZW50LXR5cGUHAAh0ZXh0L3htbDxUeXBlQiB4bWxucz0iIj48U3RyaW5nRmllbGQ+c29tZXN0cmluZzwvU3RyaW5nRmllbGQ+PEludGVnZXJGaWVsZD4xMjM8L0ludGVnZXJGaWVsZD48L1R5cGVCPiwthPo=" - } - }, - { - "given": { - "output": { - "shape": "OutputShape" - }, - "name": "OperationName" - }, - "result": { - "Payload": [ - { - "TypeC": { - "Boolean": true, - "Integer": 123, - "Blob": "someblob", - "String": "somestring", - "Timestamp": 1422172800, - "Details": { - "StringField": "somestring", - "IntegerField": 123 - } - } - } - ] - }, - "response": { - "status_code": 200, - "headers": {}, - "body": "AAABAQAAAJBjEbY4DTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVDDTpjb250ZW50LXR5cGUHAAh0ZXh0L3htbAdCb29sZWFuAAdJbnRlZ2VyBAAAAHsEQmxvYgYACHNvbWVibG9iBlN0cmluZwcACnNvbWVzdHJpbmcJVGltZXN0YW1wCAAAAUsgGsQAPERldGFpbHMgeG1sbnM9IiI+PFN0cmluZ0ZpZWxkPnNvbWVzdHJpbmc8L1N0cmluZ0ZpZWxkPjxJbnRlZ2VyRmllbGQ+MTIzPC9JbnRlZ2VyRmllbGQ+PC9EZXRhaWxzPhGUvKo=" - } - }, - { - "given": { - "output": { - "shape": "OutputShape" - }, - "name": "OperationName" - }, - "result": { - "Payload": [] - }, - "response": { - "status_code": 200, - "headers": {}, - "body": "" - } - } - ] - } -] diff --git a/tests/unit/protocols/output/json.json b/tests/unit/protocols/output/json.json index 60c55c2d..dd2173bc 100644 --- a/tests/unit/protocols/output/json.json +++ b/tests/unit/protocols/output/json.json @@ -382,5 +382,95 @@ } } ] + }, + { + "description": "RPC JSON Event Stream", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Payload": {"shape": "EventStream"}, + "InitialResponse": {"shape": "StringType"} + } + }, + "EventStream": { + "type": "structure", + "eventstream": true, + "members": { + "TypeA": {"shape": "TypeAEvent"}, + "TypeB": {"shape": "TypeBEvent"} + } + }, + "TypeAEvent": { + "type": "structure", + "event": true, + "members": { + "Payload": { + "shape": "BlobType", + "eventpayload": true + } + } + }, + "TypeBEvent": { + "type": "structure", + "event": true, + "members": { + "Details": { + "shape": "Details", + "eventpayload": true + } + } + }, + "Details": { + "type": "structure", + "members": { + "StringField": {"shape": "StringType"}, + "IntegerField": {"shape": "IntegerType"} + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "InitialResponse": "sometext", + "Payload": [ + { + "TypeA": {"Payload": "somebytes"} + }, + { + "TypeB": { + "Details": { + "StringField": "somestring", + "IntegerField": 123 + } + } + } + ] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "AAAAfgAAAE/Fo93GDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcAEGluaXRpYWwtcmVzcG9uc2UNOmNvbnRlbnQtdHlwZQcACXRleHQvanNvbnsiSW5pdGlhbFJlc3BvbnNlIjogInNvbWV0ZXh0In32mCSDAAAAbAAAAFPLgkVrDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVBDTpjb250ZW50LXR5cGUHABhhcHBsaWNhdGlvbi9vY3RldC1zdHJlYW1zb21lYnl0ZXMesj2HAAAAhgAAAEQqNR/SDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVCDTpjb250ZW50LXR5cGUHAAl0ZXh0L2pzb257IlN0cmluZ0ZpZWxkIjogInNvbWVzdHJpbmciLCAiSW50ZWdlckZpZWxkIjogMTIzfffGN30=" + } + } + ] } ] diff --git a/tests/unit/protocols/output/rest-xml.json b/tests/unit/protocols/output/rest-xml.json index 9e37d8fa..a9c55239 100644 --- a/tests/unit/protocols/output/rest-xml.json +++ b/tests/unit/protocols/output/rest-xml.json @@ -847,5 +847,177 @@ } } ] + }, + { + "description": "REST XML Event Stream", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Payload": {"shape": "EventStream"} + }, + "payload": "Payload" + }, + "EventStream": { + "type": "structure", + "eventstream": true, + "members": { + "TypeA": {"shape": "TypeAEvent"}, + "TypeB": {"shape": "TypeBEvent"}, + "TypeC": {"shape": "TypeCEvent"} + } + }, + "TypeAEvent": { + "type": "structure", + "event": true, + "members": { + "Payload": { + "shape": "BlobType", + "eventpayload": true + } + } + }, + "TypeBEvent": { + "type": "structure", + "event": true, + "members": { + "Details": { + "shape": "Details", + "eventpayload": true + } + } + }, + "TypeCEvent": { + "type": "structure", + "event": true, + "members": { + "Details": { + "shape": "Details", + "eventpayload": true + }, + "Boolean": { + "shape": "BooleanType", + "eventheader": true + }, + "Integer": { + "shape": "IntegerType", + "eventheader": true + }, + "Blob": { + "shape": "BlobType", + "eventheader": true + }, + "String": { + "shape": "StringType", + "eventheader": true + }, + "Timestamp": { + "shape": "TimestampType", + "eventheader": true + } + } + }, + "Details": { + "type": "structure", + "members": { + "StringField": {"shape": "StringType"}, + "IntegerField": {"shape": "IntegerType"} + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "TimestampType": { + "type": "timestamp" + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Payload": [ + { + "TypeA": {"Payload": "somebytes"} + }, + { + "TypeB": { + "Details": { + "StringField": "somestring", + "IntegerField": 123 + } + } + } + ] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "AAAAbAAAAFPLgkVrDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVBDTpjb250ZW50LXR5cGUHABhhcHBsaWNhdGlvbi9vY3RldC1zdHJlYW1zb21lYnl0ZXMesj2HAAAAsAAAAEOaMMdXDTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVCDTpjb250ZW50LXR5cGUHAAh0ZXh0L3htbDxUeXBlQiB4bWxucz0iIj48U3RyaW5nRmllbGQ+c29tZXN0cmluZzwvU3RyaW5nRmllbGQ+PEludGVnZXJGaWVsZD4xMjM8L0ludGVnZXJGaWVsZD48L1R5cGVCPiwthPo=" + } + }, + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Payload": [ + { + "TypeC": { + "Boolean": true, + "Integer": 123, + "Blob": "someblob", + "String": "somestring", + "Timestamp": 1422172800, + "Details": { + "StringField": "somestring", + "IntegerField": 123 + } + } + } + ] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "AAABAQAAAJBjEbY4DTptZXNzYWdlLXR5cGUHAAVldmVudAs6ZXZlbnQtdHlwZQcABVR5cGVDDTpjb250ZW50LXR5cGUHAAh0ZXh0L3htbAdCb29sZWFuAAdJbnRlZ2VyBAAAAHsEQmxvYgYACHNvbWVibG9iBlN0cmluZwcACnNvbWVzdHJpbmcJVGltZXN0YW1wCAAAAUsgGsQAPERldGFpbHMgeG1sbnM9IiI+PFN0cmluZ0ZpZWxkPnNvbWVzdHJpbmc8L1N0cmluZ0ZpZWxkPjxJbnRlZ2VyRmllbGQ+MTIzPC9JbnRlZ2VyRmllbGQ+PC9EZXRhaWxzPhGUvKo=" + } + }, + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Payload": [] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "" + } + } + ] } ] diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py index 41972939..595cec96 100644 --- a/tests/unit/test_eventstream.py +++ b/tests/unit/test_eventstream.py @@ -20,6 +20,7 @@ from botocore.eventstream import ( EventStreamMessage, MessagePrelude, EventStreamBuffer, ChecksumMismatch, InvalidPayloadLength, InvalidHeadersLength, DuplicateHeader, EventStreamHeaderParser, DecodeUtils, EventStream, + NoInitialResponseError ) from botocore.exceptions import EventStreamError @@ -363,12 +364,20 @@ def test_unpack_prelude(): assert_equal(prelude, ((1, 2, 3), 12)) -def test_event_stream_wrapper_iteration(): +def create_mock_raw_stream(*data): raw_stream = Mock() - raw_stream.stream.return_value = [ + def generator(): + for chunk in data: + yield chunk + raw_stream.stream = generator + return raw_stream + + +def test_event_stream_wrapper_iteration(): + raw_stream = create_mock_raw_stream( b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00", b"\x00\xa0\x0c{'foo':'bar'}\xd3\x89\x02\x85", - ] + ) parser = Mock(spec=EventStreamXMLParser) output_shape = Mock() event_stream = EventStream(raw_stream, output_shape, parser, '') @@ -385,10 +394,7 @@ def test_event_stream_wrapper_iteration(): @raises(EventStreamError) def test_eventstream_wrapper_iteration_error(): - raw_stream = Mock() - raw_stream.stream.return_value = [ - ERROR_EVENT_MESSAGE[0] - ] + raw_stream = create_mock_raw_stream(ERROR_EVENT_MESSAGE[0]) parser = Mock(spec=EventStreamXMLParser) parser.parse.return_value = {} output_shape = Mock() @@ -401,3 +407,44 @@ def test_event_stream_wrapper_close(): event_stream = EventStream(raw_stream, None, None, '') event_stream.close() raw_stream.close.assert_called_once_with() + + +def test_event_stream_initial_response(): + raw_stream = create_mock_raw_stream( + b'\x00\x00\x00~\x00\x00\x00O\xc5\xa3\xdd\xc6\r:message-type\x07\x00', + b'\x05event\x0b:event-type\x07\x00\x10initial-response\r:content-type', + b'\x07\x00\ttext/json{"InitialResponse": "sometext"}\xf6\x98$\x83' + ) + parser = Mock(spec=EventStreamXMLParser) + output_shape = Mock() + event_stream = EventStream(raw_stream, output_shape, parser, '') + event = event_stream.get_initial_response() + headers = { + ':message-type': 'event', + ':event-type': 'initial-response', + ':content-type': 'text/json', + } + payload = b'{"InitialResponse": "sometext"}' + assert event.headers == headers + assert event.payload == payload + + +@raises(NoInitialResponseError) +def test_event_stream_initial_response_wrong_type(): + raw_stream = create_mock_raw_stream( + b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00", + b"\x00\xa0\x0c{'foo':'bar'}\xd3\x89\x02\x85", + ) + parser = Mock(spec=EventStreamXMLParser) + output_shape = Mock() + event_stream = EventStream(raw_stream, output_shape, parser, '') + event_stream.get_initial_response() + + +@raises(NoInitialResponseError) +def test_event_stream_initial_response_no_event(): + raw_stream = create_mock_raw_stream(b'') + parser = Mock(spec=EventStreamXMLParser) + output_shape = Mock() + event_stream = EventStream(raw_stream, output_shape, parser, '') + event_stream.get_initial_response() diff --git a/tests/unit/test_monitoring.py b/tests/unit/test_monitoring.py index 7842b969..0ee6a873 100644 --- a/tests/unit/test_monitoring.py +++ b/tests/unit/test_monitoring.py @@ -605,6 +605,51 @@ class TestCSMSerializer(unittest.TestCase): serialized_event_dict = self.get_serialized_event_dict(event) self.assertEqual(serialized_event_dict['Region'], 'my-region-1') + def test_serialize_api_call_event_user_agent(self): + event = APICallEvent( + service=self.service, operation=self.operation, timestamp=1000) + attempt = event.new_api_call_attempt(2000) + attempt.request_headers = {'User-Agent': self.user_agent} + serialized_event_dict = self.get_serialized_event_dict(event) + self.assertEqual(serialized_event_dict['UserAgent'], self.user_agent) + + def test_serialize_api_call_event_http_status_code(self): + event = APICallEvent( + service=self.service, operation=self.operation, timestamp=1000) + attempt = event.new_api_call_attempt(2000) + attempt.http_status_code = 200 + serialized_event_dict = self.get_serialized_event_dict(event) + self.assertEqual(serialized_event_dict['FinalHttpStatusCode'], 200) + + def test_serialize_api_call_event_parsed_error(self): + event = APICallEvent( + service=self.service, operation=self.operation, timestamp=1000) + attempt = event.new_api_call_attempt(2000) + attempt.parsed_error = { + 'Code': 'MyErrorCode', + 'Message': 'My error message' + } + serialized_event_dict = self.get_serialized_event_dict(event) + self.assertEqual( + serialized_event_dict['FinalAwsException'], 'MyErrorCode') + self.assertEqual( + serialized_event_dict['FinalAwsExceptionMessage'], + 'My error message' + ) + + def test_serialize_api_call_event_wire_exception(self): + event = APICallEvent( + service=self.service, operation=self.operation, timestamp=1000) + attempt = event.new_api_call_attempt(2000) + attempt.wire_exception = Exception('Error on the wire') + serialized_event_dict = self.get_serialized_event_dict(event) + self.assertEqual( + serialized_event_dict['FinalSdkException'], 'Exception') + self.assertEqual( + serialized_event_dict['FinalSdkExceptionMessage'], + 'Error on the wire' + ) + def test_serialize_api_call_event_with_retries_exceeded(self): event = APICallEvent( service=self.service, operation=self.operation, timestamp=1000, diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py index 7cb05899..f5b1b1a4 100644 --- a/tests/unit/test_parsers.py +++ b/tests/unit/test_parsers.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest +from tests import unittest, RawResponse import datetime import collections @@ -225,6 +225,35 @@ class TestResponseMetadataParsed(unittest.TestCase): 'HTTPStatusCode': 200, 'HTTPHeaders': headers}}) + def test_response_no_initial_event_stream(self): + parser = parsers.JSONParser() + output_shape = model.StructureShape( + 'OutputShape', + { + 'type': 'structure', + 'members': { + 'Payload': {'shape': 'Payload'} + } + }, + model.ShapeResolver({ + 'Payload': { + 'type': 'structure', + 'members': [], + 'eventstream': True + } + }) + ) + with self.assertRaises(parsers.ResponseParserError): + response_dict = { + 'status_code': 200, + 'headers': {}, + 'body': RawResponse(b''), + 'context': { + 'operation_name': 'TestOperation' + } + } + parser.parse(response_dict, output_shape) + def test_metadata_always_exists_for_json(self): # ResponseMetadata is used for more than just the request id. It # should always get populated, even if the request doesn't seem to @@ -626,6 +655,7 @@ class TestEventStreamParsers(unittest.TestCase): 'EventB': {'shape': 'EventBStructure'}, 'EventC': {'shape': 'EventCStructure'}, 'EventD': {'shape': 'EventDStructure'}, + 'EventException': {'shape': 'ExceptionShape'}, } }, model.ShapeResolver({ @@ -684,7 +714,14 @@ class TestEventStreamParsers(unittest.TestCase): }, 'BlobShape': {'type': 'blob'}, 'StringShape': {'type': 'string'}, - 'IntShape': {'type': 'integer'} + 'IntShape': {'type': 'integer'}, + 'ExceptionShape': { + 'exception': True, + 'type': 'structure', + 'members': { + 'message': {'shape': 'StringShape'} + } + }, }) ) @@ -767,7 +804,7 @@ class TestEventStreamParsers(unittest.TestCase): self.assertEqual(parsed, expected) def test_parses_error_event(self): - error_code = 'client/SomeError', + error_code = 'client/SomeError' error_message = 'You did something wrong' headers = { ':message-type': 'error', @@ -784,6 +821,23 @@ class TestEventStreamParsers(unittest.TestCase): } self.assertEqual(parsed, expected) + def test_parses_exception_event(self): + self.parser = parsers.EventStreamJSONParser() + error_code = 'EventException' + headers = { + ':message-type': 'exception', + ':exception-type': error_code, + } + body = b'{"message": "You did something wrong"}' + parsed = self.parse_event(headers, body, status_code=400) + expected = { + 'Error': { + 'Code': error_code, + 'Message': 'You did something wrong' + } + } + self.assertEqual(parsed, expected) + def test_parses_event_json(self): self.parser = parsers.EventStreamJSONParser() headers = {':event-type': 'EventD'} diff --git a/tests/unit/test_response.py b/tests/unit/test_response.py index d1e1fd8c..6ad43e53 100644 --- a/tests/unit/test_response.py +++ b/tests/unit/test_response.py @@ -167,6 +167,12 @@ class TestStreamWrapper(unittest.TestCase): [b'1234567890', b'1234567890', b'12345'], ) + def test_streaming_line_empty_body(self): + stream = response.StreamingBody( + six.BytesIO(b''), content_length=0, + ) + self.assert_lines(stream.iter_lines(), []) + class FakeRawResponse(six.BytesIO): def stream(self, amt=1024, decode_content=None):