From 5c030d6f87e6548c47c868a8d4f06f863df229da Mon Sep 17 00:00:00 2001 From: TANIGUCHI Takaki Date: Sun, 11 Feb 2018 17:52:35 +0900 Subject: [PATCH] New upstream version 1.8.40+repack --- PKG-INFO | 2 +- botocore.egg-info/PKG-INFO | 2 +- botocore.egg-info/SOURCES.txt | 2 + botocore/__init__.py | 2 +- botocore/data/_retry.json | 9 + botocore/data/acm/2015-12-08/service-2.json | 8 +- .../apigateway/2015-07-09/examples-1.json | 0 .../data/appstream/2016-12-01/service-2.json | 44 +- .../data/appstream/2016-12-01/waiters-2.json | 55 ++ .../data/budgets/2016-10-20/examples-1.json | 0 .../data/budgets/2016-10-20/service-2.json | 550 +++++++++---- .../data/cloud9/2017-09-23/service-2.json | 14 +- .../cloudformation/2010-05-15/waiters-2.json | 1 - .../data/cloudfront/2016-09-07/service-2.json | 0 .../data/cloudfront/2017-03-25/waiters-2.json | 2 +- .../cognito-idp/2016-04-18/examples-1.json | 0 .../data/config/2014-11-12/examples-1.json | 0 botocore/data/dms/2016-01-01/service-2.json | 119 ++- botocore/data/ds/2015-04-16/service-2.json | 7 +- .../data/dynamodb/2012-08-10/service-2.json | 62 +- botocore/data/ec2/2016-09-15/examples-1.json | 0 .../data/ec2/2016-09-15/paginators-1.json | 0 botocore/data/ec2/2016-09-15/service-2.json | 0 botocore/data/ec2/2016-09-15/waiters-2.json | 0 botocore/data/ec2/2016-11-15/examples-1.json | 0 botocore/data/ec2/2016-11-15/service-2.json | 153 +++- botocore/data/ec2/2016-11-15/waiters-2.json | 29 + botocore/data/ecr/2015-09-21/examples-1.json | 0 botocore/data/ecs/2014-11-13/waiters-2.json | 2 +- .../elasticache/2015-02-02/waiters-2.json | 268 +++--- .../2010-12-01/examples-1.json | 0 botocore/data/elb/2012-06-01/waiters-2.json | 23 + botocore/data/elbv2/2015-12-01/waiters-2.json | 100 +++ botocore/data/emr/2009-03-31/waiters-2.json | 25 + botocore/data/endpoints.json | 10 + .../data/gamelift/2015-10-01/examples-1.json | 0 .../data/gamelift/2015-10-01/service-2.json | 160 ++-- botocore/data/glue/2017-03-31/service-2.json | 84 +- .../data/kinesis/2013-12-02/service-2.json | 180 +++-- .../data/lex-models/2017-04-19/service-2.json | 25 +- .../lex-runtime/2016-11-28/service-2.json | 24 +- .../data/medialive/2017-10-14/service-2.json | 145 +++- .../data/mediastore/2017-09-01/service-2.json | 192 ++++- .../data/opsworks/2013-02-18/service-2.json | 108 ++- botocore/data/rds/2014-10-31/examples-1.json | 0 botocore/data/rds/2014-10-31/waiters-2.json | 107 ++- .../data/redshift/2012-12-01/waiters-2.json | 21 +- botocore/data/s3/2006-03-01/waiters-2.json | 10 + .../servicecatalog/2015-12-10/service-2.json | 763 +++++++++++++++++- .../2017-03-14/service-2.json | 168 ++-- .../data/snowball/2016-06-30/examples-1.json | 0 botocore/data/sns/2010-03-31/examples-1.json | 0 botocore/data/sns/2010-03-31/service-2.json | 0 .../data/ssm/2014-11-06/paginators-1.json | 8 +- botocore/data/ssm/2014-11-06/service-2.json | 105 ++- .../data/support/2013-04-15/examples-1.json | 0 docs/source/conf.py | 2 +- requirements.txt | 1 + tests/functional/test_waiter_config.py | 89 ++ 59 files changed, 3058 insertions(+), 623 deletions(-) mode change 100755 => 100644 botocore/data/apigateway/2015-07-09/examples-1.json create mode 100644 botocore/data/appstream/2016-12-01/waiters-2.json mode change 100755 => 100644 botocore/data/budgets/2016-10-20/examples-1.json mode change 100755 => 100644 botocore/data/cloudfront/2016-09-07/service-2.json mode change 100755 => 100644 botocore/data/cognito-idp/2016-04-18/examples-1.json mode change 100755 => 100644 botocore/data/config/2014-11-12/examples-1.json mode change 100755 => 100644 botocore/data/ec2/2016-09-15/examples-1.json mode change 100755 => 100644 botocore/data/ec2/2016-09-15/paginators-1.json mode change 100755 => 100644 botocore/data/ec2/2016-09-15/service-2.json mode change 100755 => 100644 botocore/data/ec2/2016-09-15/waiters-2.json mode change 100755 => 100644 botocore/data/ec2/2016-11-15/examples-1.json mode change 100755 => 100644 botocore/data/ec2/2016-11-15/waiters-2.json mode change 100755 => 100644 botocore/data/ecr/2015-09-21/examples-1.json mode change 100755 => 100644 botocore/data/elasticbeanstalk/2010-12-01/examples-1.json create mode 100644 botocore/data/elbv2/2015-12-01/waiters-2.json mode change 100755 => 100644 botocore/data/gamelift/2015-10-01/examples-1.json mode change 100755 => 100644 botocore/data/rds/2014-10-31/examples-1.json mode change 100755 => 100644 botocore/data/snowball/2016-06-30/examples-1.json mode change 100755 => 100644 botocore/data/sns/2010-03-31/examples-1.json mode change 100755 => 100644 botocore/data/sns/2010-03-31/service-2.json mode change 100755 => 100644 botocore/data/support/2013-04-15/examples-1.json diff --git a/PKG-INFO b/PKG-INFO index 1850b9eb..cb68c695 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.8.36 +Version: 1.8.40 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 1850b9eb..cb68c695 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.8.36 +Version: 1.8.40 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index a48c52c5..9f26a743 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -55,6 +55,7 @@ botocore/data/application-autoscaling/2016-02-06/service-2.json botocore/data/appstream/2016-12-01/examples-1.json botocore/data/appstream/2016-12-01/paginators-1.json botocore/data/appstream/2016-12-01/service-2.json +botocore/data/appstream/2016-12-01/waiters-2.json botocore/data/appsync/2017-07-25/paginators-1.json botocore/data/appsync/2017-07-25/service-2.json botocore/data/athena/2017-05-18/paginators-1.json @@ -253,6 +254,7 @@ botocore/data/elb/2012-06-01/waiters-2.json botocore/data/elbv2/2015-12-01/examples-1.json botocore/data/elbv2/2015-12-01/paginators-1.json botocore/data/elbv2/2015-12-01/service-2.json +botocore/data/elbv2/2015-12-01/waiters-2.json botocore/data/emr/2009-03-31/examples-1.json botocore/data/emr/2009-03-31/paginators-1.json botocore/data/emr/2009-03-31/service-2.json diff --git a/botocore/__init__.py b/botocore/__init__.py index 14ee2a37..df1e0840 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.8.36' +__version__ = '1.8.40' class NullHandler(logging.Handler): diff --git a/botocore/data/_retry.json b/botocore/data/_retry.json index fe12d010..3b626dbf 100644 --- a/botocore/data/_retry.json +++ b/botocore/data/_retry.json @@ -24,6 +24,14 @@ } } }, + "request_throttled_exception": { + "applies_when": { + "response": { + "service_error_code": "RequestThrottledException", + "http_status_code": 400 + } + } + }, "too_many_requests": { "applies_when": { "response": { @@ -89,6 +97,7 @@ "limit_exceeded": {"$ref": "limit_exceeded"}, "throttling_exception": {"$ref": "throttling_exception"}, "throttled_exception": {"$ref": "throttled_exception"}, + "request_throttled_exception": {"$ref": "request_throttled_exception"}, "throttling": {"$ref": "throttling"}, "too_many_requests": {"$ref": "too_many_requests"} } diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index 4bc70874..1a3182f6 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -68,7 +68,7 @@ {"shape":"RequestInProgressException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Retrieves a certificate specified by an ARN and its certificate chain . The chain is an ordered list of certificates that contains the end entity ertificate, intermediate certificates of subordinate CAs, and the root certificate in that order. The certificate and certificate chain are base64 encoded. If you want to decode the certificate to see the individual fields, you can use OpenSSL.

" + "documentation":"

Retrieves a certificate specified by an ARN and its certificate chain . The chain is an ordered list of certificates that contains the end entity certificate, intermediate certificates of subordinate CAs, and the root certificate in that order. The certificate and certificate chain are base64 encoded. If you want to decode the certificate to see the individual fields, you can use OpenSSL.

" }, "ImportCertificate":{ "name":"ImportCertificate", @@ -82,7 +82,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM. For more information, see Integrated Services.

ACM does not provide managed renewal for certificates that you import.

For more information about importing certificates into ACM, including the differences between certificates that you import and those that ACM provides, see Importing Certificates in the AWS Certificate Manager User Guide.

In general, you can import almost any valid certificate. However, services integrated with ACM allow only certificate types they support to be associated with their resources. The following guidelines are also important:

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" + "documentation":"

Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the AWS Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" }, "ListCertificates":{ "name":"ListCertificates", @@ -680,7 +680,7 @@ }, "Includes":{ "shape":"Filters", - "documentation":"

Filter the certificate list by one or more of the following values. For more information, see the Filters structure.

" + "documentation":"

Filter the certificate list. For more information, see the Filters structure.

" }, "NextToken":{ "shape":"NextToken", @@ -811,7 +811,7 @@ }, "DomainValidationOptions":{ "shape":"DomainValidationOptionList", - "documentation":"

The domain name that you want ACM to use to send you emails so taht your can validate domain ownership.

" + "documentation":"

The domain name that you want ACM to use to send you emails so that you can validate domain ownership.

" } } }, diff --git a/botocore/data/apigateway/2015-07-09/examples-1.json b/botocore/data/apigateway/2015-07-09/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index d27e3083..43e5fb37 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -218,7 +218,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the specified directory configurations.

" + "documentation":"

Describes the specified directory configurations. Note that although the response syntax in this topic includes the account password, this password is not returned in the actual response.

" }, "DescribeFleets":{ "name":"DescribeFleets", @@ -341,7 +341,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Lists the tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "StartFleet":{ "name":"StartFleet", @@ -416,7 +416,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -429,7 +429,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates the specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Disassociates the specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "UpdateDirectoryConfig":{ "name":"UpdateDirectoryConfig", @@ -819,6 +819,10 @@ "StorageConnectors":{ "shape":"StorageConnectorList", "documentation":"

The storage connectors to enable.

" + }, + "RedirectURL":{ + "shape":"RedirectURL", + "documentation":"

The URL the user is redirected to after the streaming session ends.

" } } }, @@ -983,7 +987,7 @@ "members":{ "DirectoryConfigs":{ "shape":"DirectoryConfigList", - "documentation":"

Information about the directory configurations.

" + "documentation":"

Information about the directory configurations. Note that although the response syntax in this topic includes the account password, this password is not returned in the actual response.

" }, "NextToken":{ "shape":"String", @@ -1733,6 +1737,10 @@ "type":"string", "enum":["WINDOWS"] }, + "RedirectURL":{ + "type":"string", + "max":1000 + }, "ResourceAlreadyExistsException":{ "type":"structure", "members":{ @@ -1895,6 +1903,10 @@ "shape":"StorageConnectorList", "documentation":"

The storage connectors to enable.

" }, + "RedirectURL":{ + "shape":"RedirectURL", + "documentation":"

The URL the user is redirected to after the streaming session ends.

" + }, "StackErrors":{ "shape":"StackErrors", "documentation":"

The errors for the stack.

" @@ -1902,6 +1914,17 @@ }, "documentation":"

Describes a stack.

" }, + "StackAttribute":{ + "type":"string", + "enum":[ + "STORAGE_CONNECTORS", + "REDIRECT_URL" + ] + }, + "StackAttributes":{ + "type":"list", + "member":{"shape":"StackAttribute"} + }, "StackError":{ "type":"structure", "members":{ @@ -2236,7 +2259,16 @@ }, "DeleteStorageConnectors":{ "shape":"Boolean", - "documentation":"

Deletes the storage connectors currently enabled for the stack.

" + "documentation":"

Deletes the storage connectors currently enabled for the stack.

", + "deprecated":true + }, + "RedirectURL":{ + "shape":"RedirectURL", + "documentation":"

The URL the user is redirected to after the streaming session ends.

" + }, + "AttributesToDelete":{ + "shape":"StackAttributes", + "documentation":"

The stack attributes to delete.

" } } }, diff --git a/botocore/data/appstream/2016-12-01/waiters-2.json b/botocore/data/appstream/2016-12-01/waiters-2.json new file mode 100644 index 00000000..f53f609c --- /dev/null +++ b/botocore/data/appstream/2016-12-01/waiters-2.json @@ -0,0 +1,55 @@ +{ + "version": 2, + "waiters": { + "FleetStarted": { + "delay": 30, + "maxAttempts": 40, + "operation": "DescribeFleets", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Fleets[].State", + "expected": "ACTIVE" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Fleets[].State", + "expected": "PENDING_DEACTIVATE" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Fleets[].State", + "expected": "INACTIVE" + } + ] + }, + "FleetStopped": { + "delay": 30, + "maxAttempts": 40, + "operation": "DescribeFleets", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Fleets[].State", + "expected": "INACTIVE" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Fleets[].State", + "expected": "PENDING_ACTIVATE" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Fleets[].State", + "expected": "ACTIVE" + } + ] + } + } +} \ No newline at end of file diff --git a/botocore/data/budgets/2016-10-20/examples-1.json b/botocore/data/budgets/2016-10-20/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/budgets/2016-10-20/service-2.json b/botocore/data/budgets/2016-10-20/service-2.json index c5aa7d7c..1c45a387 100644 --- a/botocore/data/budgets/2016-10-20/service-2.json +++ b/botocore/data/budgets/2016-10-20/service-2.json @@ -26,7 +26,7 @@ {"shape":"CreationLimitExceededException"}, {"shape":"DuplicateRecordException"} ], - "documentation":"Create a new budget" + "documentation":"

Creates a budget and, if included, notifications and subscribers.

" }, "CreateNotification":{ "name":"CreateNotification", @@ -43,7 +43,7 @@ {"shape":"CreationLimitExceededException"}, {"shape":"DuplicateRecordException"} ], - "documentation":"Create a new Notification with subscribers for a budget" + "documentation":"

Creates a notification. You must create the budget before you create the associated notification.

" }, "CreateSubscriber":{ "name":"CreateSubscriber", @@ -60,7 +60,7 @@ {"shape":"DuplicateRecordException"}, {"shape":"NotFoundException"} ], - "documentation":"Create a new Subscriber for a notification" + "documentation":"

Creates a subscriber. You must create the associated budget and notification before you create the subscriber.

" }, "DeleteBudget":{ "name":"DeleteBudget", @@ -75,7 +75,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"Delete a budget and related notifications" + "documentation":"

Deletes a budget. You can delete your budget at any time.

Deleting a budget also deletes the notifications and subscribers associated with that budget.

" }, "DeleteNotification":{ "name":"DeleteNotification", @@ -90,7 +90,7 @@ {"shape":"InternalErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"Delete a notification and related subscribers" + "documentation":"

Deletes a notification.

Deleting a notification also deletes the subscribers associated with the notification.

" }, "DeleteSubscriber":{ "name":"DeleteSubscriber", @@ -105,7 +105,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"Delete a Subscriber for a notification" + "documentation":"

Deletes a subscriber.

Deleting the last subscriber to a notification also deletes the notification.

" }, "DescribeBudget":{ "name":"DescribeBudget", @@ -120,7 +120,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"Get a single budget" + "documentation":"

Describes a budget.

" }, "DescribeBudgets":{ "name":"DescribeBudgets", @@ -137,7 +137,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"Get all budgets for an account" + "documentation":"

Lists the budgets associated with an account.

" }, "DescribeNotificationsForBudget":{ "name":"DescribeNotificationsForBudget", @@ -154,7 +154,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"Get notifications of a budget" + "documentation":"

Lists the notifications associated with a budget.

" }, "DescribeSubscribersForNotification":{ "name":"DescribeSubscribersForNotification", @@ -171,7 +171,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"Get subscribers of a notification" + "documentation":"

Lists the subscribers associated with a notification.

" }, "UpdateBudget":{ "name":"UpdateBudget", @@ -186,7 +186,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"Update the information of a budget already created" + "documentation":"

Updates a budget. You can change every part of a budget except for the budgetName and the calculatedSpend. When a budget is modified, the calculatedSpend drops to zero until AWS has new usage data to use for forecasting.

" }, "UpdateNotification":{ "name":"UpdateNotification", @@ -202,7 +202,7 @@ {"shape":"NotFoundException"}, {"shape":"DuplicateRecordException"} ], - "documentation":"Update the information about a notification already created" + "documentation":"

Updates a notification.

" }, "UpdateSubscriber":{ "name":"UpdateSubscriber", @@ -218,13 +218,13 @@ {"shape":"NotFoundException"}, {"shape":"DuplicateRecordException"} ], - "documentation":"Update a subscriber" + "documentation":"

Updates a subscriber.

" } }, "shapes":{ "AccountId":{ "type":"string", - "documentation":"Account Id of the customer. It should be a 12 digit number.", + "documentation":"

The account ID of the customer. It should be a 12 digit number.

", "max":12, "min":12 }, @@ -232,32 +232,54 @@ "type":"structure", "required":[ "BudgetName", - "BudgetLimit", "TimeUnit", - "TimePeriod", "BudgetType" ], "members":{ - "BudgetName":{"shape":"BudgetName"}, - "BudgetLimit":{"shape":"Spend"}, - "CostFilters":{"shape":"CostFilters"}, - "CostTypes":{"shape":"CostTypes"}, - "TimeUnit":{"shape":"TimeUnit"}, - "TimePeriod":{"shape":"TimePeriod"}, - "CalculatedSpend":{"shape":"CalculatedSpend"}, - "BudgetType":{"shape":"BudgetType"} + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of a budget. Unique within accounts. : and \\ characters are not allowed in the BudgetName.

" + }, + "BudgetLimit":{ + "shape":"Spend", + "documentation":"

The total amount of cost, usage, or RI utilization that you want to track with your budget.

BudgetLimit is required for cost or usage budgets, but optional for RI utilization budgets. RI utilization budgets default to the only valid value for RI utilization budgets, which is 100.

" + }, + "CostFilters":{ + "shape":"CostFilters", + "documentation":"

The cost filters applied to a budget, such as service or region.

" + }, + "CostTypes":{ + "shape":"CostTypes", + "documentation":"

The types of costs included in this budget.

" + }, + "TimeUnit":{ + "shape":"TimeUnit", + "documentation":"

The length of time until a budget resets the actual and forecasted spend.

" + }, + "TimePeriod":{ + "shape":"TimePeriod", + "documentation":"

The period of time covered by a budget. Has a start date and an end date. The start date must come before the end date. There are no restrictions on the end date.

If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (i.e. DAILY, MONTHLY, QUARTERLY, ANNUALLY). For example, if you created your budget on January 24th 2018, chose DAILY, and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

You can change either date with the UpdateBudget operation.

After the end date, AWS deletes the budget and all associated notifications and subscribers.

" + }, + "CalculatedSpend":{ + "shape":"CalculatedSpend", + "documentation":"

The actual and forecasted cost or usage being tracked by a budget.

" + }, + "BudgetType":{ + "shape":"BudgetType", + "documentation":"

Whether this budget tracks monetary costs, usage, or RI utilization.

" + } }, - "documentation":"AWS Budget model" + "documentation":"

Represents the output of the CreateBudget operation. The content consists of the detailed metadata and data file information, and the current status of the budget.

The ARN pattern for a budget is: arn:aws:budgetservice::AccountId:budget/budgetName

" }, "BudgetName":{ "type":"string", - "documentation":"A string represents the budget name. No \":\" and \"\\\" character is allowed.", + "documentation":"

A string represents the budget name. No \":\" and \"\\\" character is allowed.

", "max":100, "pattern":"[^:\\\\]+" }, "BudgetType":{ "type":"string", - "documentation":"The type of a budget. It should be COST, USAGE, or RI_UTILIZATION.", + "documentation":"

The type of a budget. It should be COST, USAGE, or RI_UTILIZATION.

", "enum":[ "USAGE", "COST", @@ -267,20 +289,26 @@ "Budgets":{ "type":"list", "member":{"shape":"Budget"}, - "documentation":"A list of budgets" + "documentation":"

A list of budgets

" }, "CalculatedSpend":{ "type":"structure", "required":["ActualSpend"], "members":{ - "ActualSpend":{"shape":"Spend"}, - "ForecastedSpend":{"shape":"Spend"} + "ActualSpend":{ + "shape":"Spend", + "documentation":"

The amount of cost, usage, or RI units that you have used.

" + }, + "ForecastedSpend":{ + "shape":"Spend", + "documentation":"

The amount of cost, usage, or RI units that you are forecasted to use.

" + } }, - "documentation":"A structure that holds the actual and forecasted spend for a budget." + "documentation":"

The spend objects associated with this budget. The actualSpend tracks how much you've used, cost, usage, or RI units, and the forecastedSpend tracks how much you are predicted to spend if your current usage remains steady.

For example, if it is the 20th of the month and you have spent 50 dollars on Amazon EC2, your actualSpend is 50 USD, and your forecastedSpend is 75 USD.

" }, "ComparisonOperator":{ "type":"string", - "documentation":"The comparison operator of a notification. Currently we support less than, equal to and greater than.", + "documentation":"

The comparison operator of a notification. Currently we support less than, equal to and greater than.

", "enum":[ "GREATER_THAN", "LESS_THAN", @@ -291,57 +319,57 @@ "type":"map", "key":{"shape":"GenericString"}, "value":{"shape":"DimensionValues"}, - "documentation":"A map that represents the cost filters applied to the budget." + "documentation":"

A map that represents the cost filters applied to the budget.

" }, "CostTypes":{ "type":"structure", "members":{ "IncludeTax":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include tax in the cost budget." + "documentation":"

Specifies whether a budget includes taxes.

The default value is true.

" }, "IncludeSubscription":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include subscriptions in the cost budget." + "documentation":"

Specifies whether a budget includes subscriptions.

The default value is true.

" }, "UseBlended":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to use blended costs in the cost budget." + "documentation":"

Specifies whether a budget uses blended rate.

The default value is false.

" }, "IncludeRefund":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include refunds in the cost budget." + "documentation":"

Specifies whether a budget includes refunds.

The default value is true.

" }, "IncludeCredit":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include credits in the cost budget." + "documentation":"

Specifies whether a budget includes credits.

The default value is true.

" }, "IncludeUpfront":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include upfront costs in the cost budget." + "documentation":"

Specifies whether a budget includes upfront RI costs.

The default value is true.

" }, "IncludeRecurring":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include recurring costs in the cost budget." + "documentation":"

Specifies whether a budget includes recurring fees such as monthly RI fees.

The default value is true.

" }, "IncludeOtherSubscription":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include other subscription costs in the cost budget." + "documentation":"

Specifies whether a budget includes non-RI subscription costs.

The default value is true.

" }, "IncludeSupport":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include support costs in the cost budget." + "documentation":"

Specifies whether a budget includes support subscription fees.

The default value is true.

" }, "IncludeDiscount":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include discounts in the cost budget." + "documentation":"

Specifies whether a budget includes discounts.

The default value is true.

" }, "UseAmortized":{ "shape":"NullableBoolean", - "documentation":"A boolean value whether to include amortized costs in the cost budget." + "documentation":"

Specifies whether a budget uses the amortized rate.

The default value is false.

" } }, - "documentation":"This includes the options for getting the cost of a budget." + "documentation":"

The types of cost included in a budget, such as tax and subscriptions.

" }, "CreateBudgetRequest":{ "type":"structure", @@ -350,17 +378,26 @@ "Budget" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "Budget":{"shape":"Budget"}, - "NotificationsWithSubscribers":{"shape":"NotificationWithSubscribersList"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget.

" + }, + "Budget":{ + "shape":"Budget", + "documentation":"

The budget object that you want to create.

" + }, + "NotificationsWithSubscribers":{ + "shape":"NotificationWithSubscribersList", + "documentation":"

A notification that you want to associate with a budget. A budget can have up to five notifications, and each notification can have one SNS subscriber and up to ten email subscribers. If you include notifications and subscribers in your CreateBudget call, AWS creates the notifications and subscribers for you.

" + } }, - "documentation":"Request of CreateBudget" + "documentation":"

Request of CreateBudget

" }, "CreateBudgetResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of CreateBudget" + "documentation":"

Response of CreateBudget

" }, "CreateNotificationRequest":{ "type":"structure", @@ -371,18 +408,30 @@ "Subscribers" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "Notification":{"shape":"Notification"}, - "Subscribers":{"shape":"Subscribers"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget that you want to create a notification for.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget that you want AWS to notified you about. Budget names must be unique within an account.

" + }, + "Notification":{ + "shape":"Notification", + "documentation":"

The notification that you want to create.

" + }, + "Subscribers":{ + "shape":"Subscribers", + "documentation":"

A list of subscribers that you want to associate with the notification. Each notification can have one SNS subscriber and up to ten email subscribers.

" + } }, - "documentation":"Request of CreateNotification" + "documentation":"

Request of CreateNotification

" }, "CreateNotificationResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of CreateNotification" + "documentation":"

Response of CreateNotification

" }, "CreateSubscriberRequest":{ "type":"structure", @@ -393,25 +442,37 @@ "Subscriber" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "Notification":{"shape":"Notification"}, - "Subscriber":{"shape":"Subscriber"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId associated with the budget that you want to create a subscriber for.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget that you want to subscribe to. Budget names must be unique within an account.

" + }, + "Notification":{ + "shape":"Notification", + "documentation":"

The notification that you want to create a subscriber for.

" + }, + "Subscriber":{ + "shape":"Subscriber", + "documentation":"

The subscriber that you want to associate with a budget notification.

" + } }, - "documentation":"Request of CreateSubscriber" + "documentation":"

Request of CreateSubscriber

" }, "CreateSubscriberResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of CreateSubscriber" + "documentation":"

Response of CreateSubscriber

" }, "CreationLimitExceededException":{ "type":"structure", "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"The exception is thrown when customer tries to create a record (e.g. budget), but the number this record already exceeds the limitation.", + "documentation":"

You've exceeded the notification or subscriber limit.

", "exception":true }, "DeleteBudgetRequest":{ @@ -421,16 +482,22 @@ "BudgetName" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget that you want to delete.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget that you want to delete.

" + } }, - "documentation":"Request of DeleteBudget" + "documentation":"

Request of DeleteBudget

" }, "DeleteBudgetResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of DeleteBudget" + "documentation":"

Response of DeleteBudget

" }, "DeleteNotificationRequest":{ "type":"structure", @@ -440,17 +507,26 @@ "Notification" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "Notification":{"shape":"Notification"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget whose notification you want to delete.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget whose notification you want to delete.

" + }, + "Notification":{ + "shape":"Notification", + "documentation":"

The notification that you want to delete.

" + } }, - "documentation":"Request of DeleteNotification" + "documentation":"

Request of DeleteNotification

" }, "DeleteNotificationResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of DeleteNotification" + "documentation":"

Response of DeleteNotification

" }, "DeleteSubscriberRequest":{ "type":"structure", @@ -461,18 +537,30 @@ "Subscriber" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "Notification":{"shape":"Notification"}, - "Subscriber":{"shape":"Subscriber"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget whose subscriber you want to delete.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget whose subscriber you want to delete.

" + }, + "Notification":{ + "shape":"Notification", + "documentation":"

The notification whose subscriber you want to delete.

" + }, + "Subscriber":{ + "shape":"Subscriber", + "documentation":"

The subscriber that you want to delete.

" + } }, - "documentation":"Request of DeleteSubscriber" + "documentation":"

Request of DeleteSubscriber

" }, "DeleteSubscriberResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of DeleteSubscriber" + "documentation":"

Response of DeleteSubscriber

" }, "DescribeBudgetRequest":{ "type":"structure", @@ -481,35 +569,59 @@ "BudgetName" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget that you want a description of.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget that you want a description of.

" + } }, - "documentation":"Request of DescribeBudget" + "documentation":"

Request of DescribeBudget

" }, "DescribeBudgetResponse":{ "type":"structure", "members":{ - "Budget":{"shape":"Budget"} + "Budget":{ + "shape":"Budget", + "documentation":"

The description of the budget.

" + } }, - "documentation":"Response of DescribeBudget" + "documentation":"

Response of DescribeBudget

" }, "DescribeBudgetsRequest":{ "type":"structure", "required":["AccountId"], "members":{ - "AccountId":{"shape":"AccountId"}, - "MaxResults":{"shape":"MaxResults"}, - "NextToken":{"shape":"GenericString"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budgets that you want descriptions of.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Optional integer. Specifies the maximum number of results to return in response.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } }, - "documentation":"Request of DescribeBudgets" + "documentation":"

Request of DescribeBudgets

" }, "DescribeBudgetsResponse":{ "type":"structure", "members":{ - "Budgets":{"shape":"Budgets"}, - "NextToken":{"shape":"GenericString"} + "Budgets":{ + "shape":"Budgets", + "documentation":"

A list of budgets.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

The pagination token that indicates the next set of results that you can retrieve.

" + } }, - "documentation":"Response of DescribeBudgets" + "documentation":"

Response of DescribeBudgets

" }, "DescribeNotificationsForBudgetRequest":{ "type":"structure", @@ -518,20 +630,38 @@ "BudgetName" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "MaxResults":{"shape":"MaxResults"}, - "NextToken":{"shape":"GenericString"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget whose notifications you want descriptions of.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget whose notifications you want descriptions of.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Optional integer. Specifies the maximum number of results to return in response.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } }, - "documentation":"Request of DescribeNotificationsForBudget" + "documentation":"

Request of DescribeNotificationsForBudget

" }, "DescribeNotificationsForBudgetResponse":{ "type":"structure", "members":{ - "Notifications":{"shape":"Notifications"}, - "NextToken":{"shape":"GenericString"} + "Notifications":{ + "shape":"Notifications", + "documentation":"

A list of notifications associated with a budget.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

The pagination token that indicates the next set of results that you can retrieve.

" + } }, - "documentation":"Response of GetNotificationsForBudget" + "documentation":"

Response of GetNotificationsForBudget

" }, "DescribeSubscribersForNotificationRequest":{ "type":"structure", @@ -541,21 +671,42 @@ "Notification" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "Notification":{"shape":"Notification"}, - "MaxResults":{"shape":"MaxResults"}, - "NextToken":{"shape":"GenericString"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget whose subscribers you want descriptions of.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget whose subscribers you want descriptions of.

" + }, + "Notification":{ + "shape":"Notification", + "documentation":"

The notification whose subscribers you want to list.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Optional integer. Specifies the maximum number of results to return in response.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } }, - "documentation":"Request of DescribeSubscribersForNotification" + "documentation":"

Request of DescribeSubscribersForNotification

" }, "DescribeSubscribersForNotificationResponse":{ "type":"structure", "members":{ - "Subscribers":{"shape":"Subscribers"}, - "NextToken":{"shape":"GenericString"} + "Subscribers":{ + "shape":"Subscribers", + "documentation":"

A list of subscribers associated with a notification.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

The pagination token that indicates the next set of results that you can retrieve.

" + } }, - "documentation":"Response of DescribeSubscribersForNotification" + "documentation":"

Response of DescribeSubscribersForNotification

" }, "DimensionValues":{ "type":"list", @@ -566,7 +717,7 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"The exception is thrown when customer tries to create a record (e.g. budget) that already exists.", + "documentation":"

The budget name already exists. Budget names must be unique within an account.

", "exception":true }, "ExpiredNextTokenException":{ @@ -574,23 +725,23 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"This exception is thrown if the paging token is expired - past its TTL", + "documentation":"

The pagination token expired.

", "exception":true }, "GenericString":{ "type":"string", - "documentation":"A generic String." + "documentation":"

A generic String.

" }, "GenericTimestamp":{ "type":"timestamp", - "documentation":"A generic timestamp. In Java it is transformed to a Date object." + "documentation":"

A generic timestamp. In Java it is transformed to a Date object.

" }, "InternalErrorException":{ "type":"structure", "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"This exception is thrown on an unknown internal failure.", + "documentation":"

An error on the server occurred during the processing of your request. Try again later.

", "exception":true }, "InvalidNextTokenException":{ @@ -598,7 +749,7 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"This exception is thrown if paging token signature didn't match the token, or the paging token isn't for this request", + "documentation":"

The pagination token is invalid.

", "exception":true }, "InvalidParameterException":{ @@ -606,12 +757,12 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"This exception is thrown if any request is given an invalid parameter. E.g., if a required Date field is null.", + "documentation":"

An error on the client occurred. Typically, the cause is an invalid input value.

", "exception":true }, "MaxResults":{ "type":"integer", - "documentation":"An integer to represent how many entries a paginated response contains. Maximum is set to 100.", + "documentation":"

An integer to represent how many entries a paginated response contains. Maximum is set to 100.

", "box":true, "max":100, "min":1 @@ -621,7 +772,7 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"This exception is thrown if a requested entity is not found. E.g., if a budget id doesn't exist for an account ID.", + "documentation":"

We can’t locate the resource that you specified.

", "exception":true }, "Notification":{ @@ -632,22 +783,34 @@ "Threshold" ], "members":{ - "NotificationType":{"shape":"NotificationType"}, - "ComparisonOperator":{"shape":"ComparisonOperator"}, - "Threshold":{"shape":"NotificationThreshold"}, - "ThresholdType":{"shape":"ThresholdType"} + "NotificationType":{ + "shape":"NotificationType", + "documentation":"

Whether the notification is for how much you have spent (ACTUAL) or for how much you are forecasted to spend (FORECASTED).

" + }, + "ComparisonOperator":{ + "shape":"ComparisonOperator", + "documentation":"

The comparison used for this notification.

" + }, + "Threshold":{ + "shape":"NotificationThreshold", + "documentation":"

The threshold associated with a notification. Thresholds are always a percentage.

" + }, + "ThresholdType":{ + "shape":"ThresholdType", + "documentation":"

The type of threshold for a notification. For ACTUAL thresholds, AWS notifies you when you go over the threshold, and for FORECASTED thresholds AWS notifies you when you are forecasted to go over the threshold.

" + } }, - "documentation":"Notification model. Each budget may contain multiple notifications with different settings." + "documentation":"

A notification associated with a budget. A budget can have up to five notifications.

Each notification must have at least one subscriber. A notification can have one SNS subscriber and up to ten email subscribers, for a total of 11 subscribers.

For example, if you have a budget for 200 dollars and you want to be notified when you go over 160 dollars, create a notification with the following parameters:

" }, "NotificationThreshold":{ "type":"double", - "documentation":"The threshold of a notification. It should be a number between 0 and 1,000,000,000.", + "documentation":"

The threshold of a notification. It should be a number between 0 and 1,000,000,000.

", "max":1000000000, "min":0.1 }, "NotificationType":{ "type":"string", - "documentation":"The type of a notification. It should be ACTUAL or FORECASTED.", + "documentation":"

The type of a notification. It should be ACTUAL or FORECASTED.

", "enum":[ "ACTUAL", "FORECASTED" @@ -660,21 +823,27 @@ "Subscribers" ], "members":{ - "Notification":{"shape":"Notification"}, - "Subscribers":{"shape":"Subscribers"} + "Notification":{ + "shape":"Notification", + "documentation":"

The notification associated with a budget.

" + }, + "Subscribers":{ + "shape":"Subscribers", + "documentation":"

A list of subscribers who are subscribed to this notification.

" + } }, - "documentation":"A structure to relate notification and a list of subscribers who belong to the notification." + "documentation":"

A notification with subscribers. A notification can have one SNS subscriber and up to ten email subscribers, for a total of 11 subscribers.

" }, "NotificationWithSubscribersList":{ "type":"list", "member":{"shape":"NotificationWithSubscribers"}, - "documentation":"A list of Notifications, each with a list of subscribers.", + "documentation":"

A list of Notifications, each with a list of subscribers.

", "max":5 }, "Notifications":{ "type":"list", "member":{"shape":"Notification"}, - "documentation":"A list of notifications." + "documentation":"

A list of notifications.

" }, "NullableBoolean":{ "type":"boolean", @@ -682,7 +851,7 @@ }, "NumericValue":{ "type":"string", - "documentation":"A string to represent NumericValue.", + "documentation":"

A string to represent NumericValue.

", "pattern":"[0-9]*(\\.)?[0-9]+" }, "Spend":{ @@ -692,10 +861,16 @@ "Unit" ], "members":{ - "Amount":{"shape":"NumericValue"}, - "Unit":{"shape":"UnitValue"} + "Amount":{ + "shape":"NumericValue", + "documentation":"

The cost or usage amount associated with a budget forecast, actual spend, or budget threshold.

" + }, + "Unit":{ + "shape":"UnitValue", + "documentation":"

The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.

" + } }, - "documentation":"A structure that represents either a cost spend or usage spend. Contains an amount and a unit." + "documentation":"

The amount of cost or usage being measured for a budget.

For example, a Spend for 3 GB of S3 usage would have the following parameters:

" }, "Subscriber":{ "type":"structure", @@ -704,26 +879,32 @@ "Address" ], "members":{ - "SubscriptionType":{"shape":"SubscriptionType"}, - "Address":{"shape":"SubscriberAddress"} + "SubscriptionType":{ + "shape":"SubscriptionType", + "documentation":"

The type of notification that AWS sends to a subscriber.

" + }, + "Address":{ + "shape":"SubscriberAddress", + "documentation":"

The address that AWS sends budget notifications to, either an SNS topic or an email.

" + } }, - "documentation":"Subscriber model. Each notification may contain multiple subscribers with different addresses." + "documentation":"

The subscriber to a budget notification. The subscriber consists of a subscription type and either an Amazon Simple Notification Service topic or an email address.

For example, an email subscriber would have the following parameters:

" }, "SubscriberAddress":{ "type":"string", - "documentation":"String containing email or sns topic for the subscriber address.", + "documentation":"

String containing email or sns topic for the subscriber address.

", "min":1 }, "Subscribers":{ "type":"list", "member":{"shape":"Subscriber"}, - "documentation":"A list of subscribers.", + "documentation":"

A list of subscribers.

", "max":11, "min":1 }, "SubscriptionType":{ "type":"string", - "documentation":"The subscription type of the subscriber. It can be SMS or EMAIL.", + "documentation":"

The subscription type of the subscriber. It can be SMS or EMAIL.

", "enum":[ "SNS", "EMAIL" @@ -731,7 +912,7 @@ }, "ThresholdType":{ "type":"string", - "documentation":"The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE.", + "documentation":"

The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE.

", "enum":[ "PERCENTAGE", "ABSOLUTE_VALUE" @@ -739,19 +920,21 @@ }, "TimePeriod":{ "type":"structure", - "required":[ - "Start", - "End" - ], "members":{ - "Start":{"shape":"GenericTimestamp"}, - "End":{"shape":"GenericTimestamp"} + "Start":{ + "shape":"GenericTimestamp", + "documentation":"

The start date for a budget. If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (i.e. DAILY, MONTHLY, QUARTERLY, ANNUALLY). For example, if you created your budget on January 24th 2018, chose DAILY, and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

You can change your start date with the UpdateBudget operation.

" + }, + "End":{ + "shape":"GenericTimestamp", + "documentation":"

The end date for a budget. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

After the end date, AWS deletes the budget and all associated notifications and subscribers. You can change your end date with the UpdateBudget operation.

" + } }, - "documentation":"A time period indicating the start date and end date of a budget." + "documentation":"

The period of time covered by a budget. Has a start date and an end date. The start date must come before the end date. There are no restrictions on the end date.

" }, "TimeUnit":{ "type":"string", - "documentation":"The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc.", + "documentation":"

The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc.

", "enum":[ "DAILY", "MONTHLY", @@ -761,7 +944,7 @@ }, "UnitValue":{ "type":"string", - "documentation":"A string to represent budget spend unit. It should be not null and not empty.", + "documentation":"

A string to represent budget spend unit. It should be not null and not empty.

", "min":1 }, "UpdateBudgetRequest":{ @@ -771,16 +954,22 @@ "NewBudget" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "NewBudget":{"shape":"Budget"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget that you want to update.

" + }, + "NewBudget":{ + "shape":"Budget", + "documentation":"

The budget that you want to update your budget to.

" + } }, - "documentation":"Request of UpdateBudget" + "documentation":"

Request of UpdateBudget

" }, "UpdateBudgetResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of UpdateBudget" + "documentation":"

Response of UpdateBudget

" }, "UpdateNotificationRequest":{ "type":"structure", @@ -791,18 +980,30 @@ "NewNotification" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "OldNotification":{"shape":"Notification"}, - "NewNotification":{"shape":"Notification"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget whose notification you want to update.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget whose notification you want to update.

" + }, + "OldNotification":{ + "shape":"Notification", + "documentation":"

The previous notification associated with a budget.

" + }, + "NewNotification":{ + "shape":"Notification", + "documentation":"

The updated notification to be associated with a budget.

" + } }, - "documentation":"Request of UpdateNotification" + "documentation":"

Request of UpdateNotification

" }, "UpdateNotificationResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of UpdateNotification" + "documentation":"

Response of UpdateNotification

" }, "UpdateSubscriberRequest":{ "type":"structure", @@ -814,24 +1015,39 @@ "NewSubscriber" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "BudgetName":{"shape":"BudgetName"}, - "Notification":{"shape":"Notification"}, - "OldSubscriber":{"shape":"Subscriber"}, - "NewSubscriber":{"shape":"Subscriber"} + "AccountId":{ + "shape":"AccountId", + "documentation":"

The accountId that is associated with the budget whose subscriber you want to update.

" + }, + "BudgetName":{ + "shape":"BudgetName", + "documentation":"

The name of the budget whose subscriber you want to update.

" + }, + "Notification":{ + "shape":"Notification", + "documentation":"

The notification whose subscriber you want to update.

" + }, + "OldSubscriber":{ + "shape":"Subscriber", + "documentation":"

The previous subscriber associated with a budget notification.

" + }, + "NewSubscriber":{ + "shape":"Subscriber", + "documentation":"

The updated subscriber associated with a budget notification.

" + } }, - "documentation":"Request of UpdateSubscriber" + "documentation":"

Request of UpdateSubscriber

" }, "UpdateSubscriberResponse":{ "type":"structure", "members":{ }, - "documentation":"Response of UpdateSubscriber" + "documentation":"

Response of UpdateSubscriber

" }, "errorMessage":{ "type":"string", - "documentation":"The error message the exception carries." + "documentation":"

The error message the exception carries.

" } }, - "documentation":"All public APIs for AWS Budgets" + "documentation":"

Budgets enable you to plan your service usage, service costs, and your RI utilization. You can also track how close your plan is to your budgeted amount or to the free tier limits. Budgets provide you with a quick way to see your usage-to-date and current estimated charges from AWS and to see how much your predicted usage accrues in charges by the end of the month. Budgets also compare current estimates and charges to the amount that you indicated you want to use or spend and lets you see how much of your budget has been used. AWS updates your budget status several times a day. Budgets track your unblended costs, subscriptions, and refunds. You can create the following types of budgets:

You can create up to 20,000 budgets per AWS master account. Your first two budgets are free of charge. Each additional budget costs $0.02 per day. You can set up optional notifications that warn you if you exceed, or are forecasted to exceed, your budgeted amount. You can have notifications sent to an Amazon SNS topic, to an email address, or to both. For more information, see Creating an Amazon SNS Topic for Budget Notifications. AWS Free Tier usage alerts via AWS Budgets are provided for you, and do not count toward your budget limits.

Service Endpoint

The AWS Budgets API provides the following endpoint:

" } diff --git a/botocore/data/cloud9/2017-09-23/service-2.json b/botocore/data/cloud9/2017-09-23/service-2.json index ce23682a..8b3deb56 100644 --- a/botocore/data/cloud9/2017-09-23/service-2.json +++ b/botocore/data/cloud9/2017-09-23/service-2.json @@ -28,7 +28,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Creates an AWS Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then hosts the environment on the instance.

", + "documentation":"

Creates an AWS Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment.

", "idempotent":true }, "CreateEnvironmentMembership":{ @@ -68,7 +68,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Deletes an AWS Cloud9 development environment. If the environment is hosted on an Amazon Elastic Compute Cloud (Amazon EC2) instance, also terminates the instance.

", + "documentation":"

Deletes an AWS Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.

", "idempotent":true }, "DeleteEnvironmentMembership":{ @@ -259,7 +259,7 @@ }, "instanceType":{ "shape":"InstanceType", - "documentation":"

The type of instance to host the environment on (for example, t2.micro).

" + "documentation":"

The type of instance to connect to the environment (for example, t2.micro).

" }, "subnetId":{ "shape":"SubnetId", @@ -419,7 +419,7 @@ "members":{ "environmentIds":{ "shape":"BoundedEnvironmentIdList", - "documentation":"

The IDs of invidividual environments to get information about.

" + "documentation":"

The IDs of individual environments to get information about.

" } } }, @@ -449,7 +449,7 @@ }, "type":{ "shape":"EnvironmentType", - "documentation":"

The type of environment. Valid values include the following:

" + "documentation":"

The type of environment. Valid values include the following:

" }, "arn":{ "shape":"String", @@ -673,7 +673,7 @@ }, "name":{ "shape":"EnvironmentName", - "documentation":"

Any replacement name for the environment.

" + "documentation":"

A replacement name for the environment.

" }, "description":{ "shape":"EnvironmentDescription", @@ -691,5 +691,5 @@ "pattern":"arn:aws:(iam|sts)::\\d+:\\S+" } }, - "documentation":"AWS Cloud9

AWS Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud.

In the background, these tools are available through development environments running on Amazon Elastic Compute Cloud (Amazon EC2) instances (known as Amazon EC2 environments), your own servers (known as SSH environments), or a combination. This enables you to create and switch between multiple environments, with each environment set up for a specific development project.

For more information about AWS Cloud9, see the AWS Cloud9 User Guide.

AWS Cloud9 supports these operations:

" + "documentation":"AWS Cloud9

AWS Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud.

For more information about AWS Cloud9, see the AWS Cloud9 User Guide.

AWS Cloud9 supports these operations:

" } diff --git a/botocore/data/cloudformation/2010-05-15/waiters-2.json b/botocore/data/cloudformation/2010-05-15/waiters-2.json index a8af98b2..4e8c8282 100644 --- a/botocore/data/cloudformation/2010-05-15/waiters-2.json +++ b/botocore/data/cloudformation/2010-05-15/waiters-2.json @@ -153,7 +153,6 @@ } ] }, - "ChangeSetCreateComplete": { "delay": 30, "operation": "DescribeChangeSet", diff --git a/botocore/data/cloudfront/2016-09-07/service-2.json b/botocore/data/cloudfront/2016-09-07/service-2.json old mode 100755 new mode 100644 diff --git a/botocore/data/cloudfront/2017-03-25/waiters-2.json b/botocore/data/cloudfront/2017-03-25/waiters-2.json index 6e044bc5..edd74b2a 100644 --- a/botocore/data/cloudfront/2017-03-25/waiters-2.json +++ b/botocore/data/cloudfront/2017-03-25/waiters-2.json @@ -18,7 +18,7 @@ "InvalidationCompleted": { "delay": 20, "operation": "GetInvalidation", - "maxAttempts": 60, + "maxAttempts": 30, "description": "Wait until an invalidation has completed.", "acceptors": [ { diff --git a/botocore/data/cognito-idp/2016-04-18/examples-1.json b/botocore/data/cognito-idp/2016-04-18/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/config/2014-11-12/examples-1.json b/botocore/data/config/2014-11-12/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/dms/2016-01-01/service-2.json b/botocore/data/dms/2016-01-01/service-2.json index 44468aec..bed0bfd7 100644 --- a/botocore/data/dms/2016-01-01/service-2.json +++ b/botocore/data/dms/2016-01-01/service-2.json @@ -318,6 +318,20 @@ ], "documentation":"

Returns the status of the RefreshSchemas operation.

" }, + "DescribeReplicationInstanceTaskLogs":{ + "name":"DescribeReplicationInstanceTaskLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationInstanceTaskLogsMessage"}, + "output":{"shape":"DescribeReplicationInstanceTaskLogsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Returns information about the task logs for the specified task.

" + }, "DescribeReplicationInstances":{ "name":"DescribeReplicationInstances", "http":{ @@ -408,7 +422,8 @@ "output":{"shape":"ImportCertificateResponse"}, "errors":[ {"shape":"ResourceAlreadyExistsFault"}, - {"shape":"InvalidCertificateFault"} + {"shape":"InvalidCertificateFault"}, + {"shape":"ResourceQuotaExceededFault"} ], "documentation":"

Uploads the specified certificate.

" }, @@ -510,6 +525,20 @@ ], "documentation":"

Modifies the specified replication task.

You can't modify the task endpoints. The task must be stopped before you can modify it.

For more information about AWS DMS tasks, see the AWS DMS user guide at Working with Migration Tasks

" }, + "RebootReplicationInstance":{ + "name":"RebootReplicationInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootReplicationInstanceMessage"}, + "output":{"shape":"RebootReplicationInstanceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Reboots a replication instance. Rebooting results in a momentary outage, until the replication instance becomes available again.

" + }, "RefreshSchemas":{ "name":"RefreshSchemas", "http":{ @@ -807,7 +836,7 @@ }, "EngineName":{ "shape":"String", - "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndPointType, include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, S3, SYBASE, DYNAMODB, MONGODB, and SQLSERVER.

" + "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, redshift, S3, sybase, dynamodb, mongodb, and sqlserver.

" }, "Username":{ "shape":"String", @@ -1518,6 +1547,41 @@ }, "documentation":"

" }, + "DescribeReplicationInstanceTaskLogsMessage":{ + "type":"structure", + "required":["ReplicationInstanceArn"], + "members":{ + "ReplicationInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the replication instance.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, + "DescribeReplicationInstanceTaskLogsResponse":{ + "type":"structure", + "members":{ + "ReplicationInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the replication instance.

" + }, + "ReplicationInstanceTaskLogs":{ + "shape":"ReplicationInstanceTaskLogsList", + "documentation":"

An array of replication task log metadata. Each member of the array contains the replication task name, ARN, and task log size (in bytes).

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, "DescribeReplicationInstancesMessage":{ "type":"structure", "members":{ @@ -1757,7 +1821,7 @@ }, "EngineName":{ "shape":"String", - "documentation":"

The database engine name. Valid values, depending on the EndPointType, include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, S3, SYBASE, DYNAMODB, MONGODB, and SQLSERVER.

" + "documentation":"

The database engine name. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, redshift, S3, sybase, dynamodb, mongodb, and sqlserver.

" }, "Username":{ "shape":"String", @@ -2084,7 +2148,7 @@ }, "EngineName":{ "shape":"String", - "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndPointType, include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, S3, DYNAMODB, MONGODB, SYBASE, and SQLSERVER.

" + "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, redshift, S3, sybase, dynamodb, mongodb, and sqlserver.

" }, "Username":{ "shape":"String", @@ -2409,6 +2473,29 @@ "type":"list", "member":{"shape":"OrderableReplicationInstance"} }, + "RebootReplicationInstanceMessage":{ + "type":"structure", + "required":["ReplicationInstanceArn"], + "members":{ + "ReplicationInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the replication instance.

" + }, + "ForceFailover":{ + "shape":"BooleanOptional", + "documentation":"

If this parameter is true, the reboot is conducted through a Multi-AZ failover. (If the instance isn't configured for Multi-AZ, then you can't specify true.)

" + } + } + }, + "RebootReplicationInstanceResponse":{ + "type":"structure", + "members":{ + "ReplicationInstance":{ + "shape":"ReplicationInstance", + "documentation":"

The replication instance that is being rebooted.

" + } + } + }, "RefreshSchemasMessage":{ "type":"structure", "required":[ @@ -2632,6 +2719,28 @@ "type":"list", "member":{"shape":"String"} }, + "ReplicationInstanceTaskLog":{ + "type":"structure", + "members":{ + "ReplicationTaskName":{ + "shape":"String", + "documentation":"

The name of the replication task.

" + }, + "ReplicationTaskArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the replication task.

" + }, + "ReplicationInstanceTaskLogSize":{ + "shape":"Long", + "documentation":"

The size, in bytes, of the replication task log.

" + } + }, + "documentation":"

Contains metadata for a replication instance task log.

" + }, + "ReplicationInstanceTaskLogsList":{ + "type":"list", + "member":{"shape":"ReplicationInstanceTaskLog"} + }, "ReplicationPendingModifiedValues":{ "type":"structure", "members":{ @@ -3070,7 +3179,7 @@ "members":{ "EngineName":{ "shape":"String", - "documentation":"

The database engine name. Valid values, depending on the EndPointType, include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, S3, SYBASE, DYNAMODB, MONGODB, and SQLSERVER.

" + "documentation":"

The database engine name. Valid values, depending on the EndPointType, include mysql, oracle, postgres, mariadb, aurora, redshift, S3, sybase, dynamodb, mongodb, and sqlserver.

" }, "SupportsCDC":{ "shape":"Boolean", diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index d5602e6c..622a0382 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Directory Service", "serviceFullName":"AWS Directory Service", + "serviceId":"Directory Service", "signatureVersion":"v4", "targetPrefix":"DirectoryService_20150416", "uid":"ds-2015-04-16" @@ -2562,7 +2563,7 @@ }, "SecurityGroupId":{ "type":"string", - "pattern":"^(sg-[0-9a-f]{8})$" + "pattern":"^(sg-[0-9a-f]{8}|sg-[0-9a-f]{17})$" }, "Server":{ "type":"string", @@ -2718,7 +2719,7 @@ "StateLastUpdatedDateTime":{"type":"timestamp"}, "SubnetId":{ "type":"string", - "pattern":"^(subnet-[0-9a-f]{8})$" + "pattern":"^(subnet-[0-9a-f]{8}|subnet-[0-9a-f]{17})$" }, "SubnetIds":{ "type":"list", @@ -2995,7 +2996,7 @@ }, "VpcId":{ "type":"string", - "pattern":"^(vpc-[0-9a-f]{8})$" + "pattern":"^(vpc-[0-9a-f]{8}|vpc-[0-9a-f]{17})$" } }, "documentation":"AWS Directory Service

AWS Directory Service is a web service that makes it easy for you to setup and run directories in the AWS cloud, or connect your AWS resources with an existing on-premises Microsoft Active Directory. This guide provides detailed information about AWS Directory Service operations, data types, parameters, and errors. For information about AWS Directory Services features, see AWS Directory Service and the AWS Directory Service Administration Guide.

AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS Directory Service and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

" diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 63fff557..7cb17d83 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -60,7 +60,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table. This results in a fast, low-cost, and scalable backup process. In general, the larger the table, the more time it takes to back up. The backup is stored in an S3 data store that is maintained and managed by DynamoDB.

Backups incorporate all writes (delete, put, update) that were completed within the last minute before the backup request was initiated. Backups might include some writes (delete, put, update) that were completed before the backup request was finished.

For example, if you submit the backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

" + "documentation":"

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

" }, "CreateGlobalTable":{ "name":"CreateGlobalTable", @@ -182,7 +182,7 @@ {"shape":"InternalServerError"}, {"shape":"GlobalTableNotFoundException"} ], - "documentation":"

Returns information about the global table.

" + "documentation":"

Returns information about the specified global table.

" }, "DescribeLimits":{ "name":"DescribeLimits", @@ -264,7 +264,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all the global tables. Only those global tables that have replicas in the region specified as input are returned.

" + "documentation":"

Lists all global tables that have a replica in the specified region.

" }, "ListTables":{ "name":"ListTables", @@ -341,7 +341,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new table from an existing backup. Any number of users can execute up to 10 concurrent restores in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

" + "documentation":"

Creates a new table from an existing backup. Any number of users can execute up to 10 concurrent restores in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

" }, "Scan":{ "name":"Scan", @@ -403,7 +403,7 @@ {"shape":"ReplicaNotFoundException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Adds or removes replicas to the specified global table. The global table should already exist to be able to use this operation. Currently, the replica to be added should be empty.

" + "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, must have DynamoDB Streams enabled, and cannot have any local secondary indexes (LSIs).

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

" }, "UpdateItem":{ "name":"UpdateItem", @@ -560,7 +560,7 @@ "members":{ "Value":{ "shape":"AttributeValue", - "documentation":"

Represents the data for an attribute.

Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.

For more information, see Data TYpes in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Represents the data for an attribute.

Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.

For more information, see Data Types in the Amazon DynamoDB Developer Guide.

" }, "Action":{ "shape":"AttributeAction", @@ -637,7 +637,8 @@ "BackupName":{ "type":"string", "max":255, - "min":3 + "min":3, + "pattern":"[a-zA-Z0-9_.-]+" }, "BackupNotFoundException":{ "type":"structure", @@ -1043,6 +1044,10 @@ "StreamSpecification":{ "shape":"StreamSpecification", "documentation":"

The settings for DynamoDB Streams on the table. These settings consist of:

" + }, + "SSESpecification":{ + "shape":"SSESpecification", + "documentation":"

Represents the settings used to enable server-side encryption.

" } }, "documentation":"

Represents the input of a CreateTable operation.

" @@ -1779,7 +1784,7 @@ "documentation":"

Too many operations for a given subscriber.

" } }, - "documentation":"

The number of concurrent table requests (cumulative number of tables in the CREATING, DELETING or UPDATING state) exceeds the maximum allowed of 10.

Also, for tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

The total limit of tables in the ACTIVE state is 250.

", + "documentation":"

Up to 50 CreateBackup operations are allowed per second, per account. There is no limit to the number of daily on-demand backups that can be taken.

Up to 10 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, and RestoreTableFromBackup.

For tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

The total limit of tables in the ACTIVE state is 250.

For tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

The total limit of tables in the ACTIVE state is 250.

", "exception":true }, "ListAttributeValue":{ @@ -2247,7 +2252,7 @@ }, "KeyConditionExpression":{ "shape":"KeyExpression", - "documentation":"

The condition that specifies the key value(s) for items to be retrieved by the Query action.

The condition must perform an equality test on a single partition key value. The condition can also perform one of several comparison tests on a single sort key value. Query can use KeyConditionExpression to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values.

The partition key equality test is required, and must be specified in the following format:

partitionKeyName = :partitionkeyval

If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key:

partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval

Valid comparisons for the sort key condition are as follows:

Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime.

You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The condition that specifies the key value(s) for items to be retrieved by the Query action.

The condition must perform an equality test on a single partition key value.

The condition can optionally perform one of several comparison tests on a single sort key value. This allows Query to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values.

The partition key equality test is required, and must be specified in the following format:

partitionKeyName = :partitionkeyval

If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key:

partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval

Valid comparisons for the sort key condition are as follows:

Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime.

You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

" }, "ExpressionAttributeNames":{ "shape":"ExpressionAttributeNameMap", @@ -2456,6 +2461,37 @@ "UPDATED_NEW" ] }, + "SSEDescription":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SSEStatus", + "documentation":"

The current state of server-side encryption:

" + } + }, + "documentation":"

The description of the server-side encryption status on the specified table.

" + }, + "SSEEnabled":{"type":"boolean"}, + "SSESpecification":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"SSEEnabled", + "documentation":"

Indicates whether server-side encryption is enabled (true) or disabled (false) on the table.

" + } + }, + "documentation":"

Represents the settings used to enable server-side encryption.

" + }, + "SSEStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED" + ] + }, "ScalarAttributeType":{ "type":"string", "enum":[ @@ -2645,6 +2681,10 @@ "TimeToLiveDescription":{ "shape":"TimeToLiveDescription", "documentation":"

Time to Live settings on the table when the backup was created.

" + }, + "SSEDescription":{ + "shape":"SSEDescription", + "documentation":"

The description of the server-side encryption status on the table when the backup was created.

" } }, "documentation":"

Contains the details of the features enabled on the table when the backup was created. For example, LSIs, GSIs, streams, TTL.

" @@ -2760,6 +2800,10 @@ "RestoreSummary":{ "shape":"RestoreSummary", "documentation":"

Contains details for the restore.

" + }, + "SSEDescription":{ + "shape":"SSEDescription", + "documentation":"

The description of the server-side encryption status on the specified table.

" } }, "documentation":"

Represents the properties of a table.

" diff --git a/botocore/data/ec2/2016-09-15/examples-1.json b/botocore/data/ec2/2016-09-15/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/ec2/2016-09-15/paginators-1.json b/botocore/data/ec2/2016-09-15/paginators-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/ec2/2016-09-15/service-2.json b/botocore/data/ec2/2016-09-15/service-2.json old mode 100755 new mode 100644 diff --git a/botocore/data/ec2/2016-09-15/waiters-2.json b/botocore/data/ec2/2016-09-15/waiters-2.json old mode 100755 new mode 100644 diff --git a/botocore/data/ec2/2016-11-15/examples-1.json b/botocore/data/ec2/2016-11-15/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 302e3a02..3b366a1b 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -1000,6 +1000,16 @@ "output":{"shape":"DescribeAddressesResult"}, "documentation":"

Describes one or more of your Elastic IP addresses.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" }, + "DescribeAggregateIdFormat":{ + "name":"DescribeAggregateIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAggregateIdFormatRequest"}, + "output":{"shape":"DescribeAggregateIdFormatResult"}, + "documentation":"

Describes the longer ID format settings for all resource types in a specific region. This request is useful for performing a quick audit to determine whether a specific region is fully opted in for longer IDs (17-character IDs).

This request only returns information about resource types that support longer IDs.

The following resource types support longer IDs: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection.

" + }, "DescribeAvailabilityZones":{ "name":"DescribeAvailabilityZones", "http":{ @@ -1168,7 +1178,7 @@ }, "input":{"shape":"DescribeIdFormatRequest"}, "output":{"shape":"DescribeIdFormatResult"}, - "documentation":"

Describes the ID format settings for your resources on a per-region basis, for example, to view which resource types are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types.

The following resource types support longer IDs: instance | reservation | snapshot | volume.

These settings apply to the IAM user who makes the request; they do not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user, unless they explicitly override the settings by running the ModifyIdFormat command. Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" + "documentation":"

Describes the ID format settings for your resources on a per-region basis, for example, to view which resource types are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types.

The following resource types support longer IDs: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection.

These settings apply to the IAM user who makes the request; they do not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user, unless they explicitly override the settings by running the ModifyIdFormat command. Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" }, "DescribeIdentityIdFormat":{ "name":"DescribeIdentityIdFormat", @@ -1178,7 +1188,7 @@ }, "input":{"shape":"DescribeIdentityIdFormatRequest"}, "output":{"shape":"DescribeIdentityIdFormatResult"}, - "documentation":"

Describes the ID format settings for resources for the specified IAM user, IAM role, or root user. For example, you can view the resource types that are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

The following resource types support longer IDs: instance | reservation | snapshot | volume.

These settings apply to the principal specified in the request. They do not apply to the principal that makes the request.

" + "documentation":"

Describes the ID format settings for resources for the specified IAM user, IAM role, or root user. For example, you can view the resource types that are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

The following resource types support longer IDs: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection.

These settings apply to the principal specified in the request. They do not apply to the principal that makes the request.

" }, "DescribeImageAttribute":{ "name":"DescribeImageAttribute", @@ -1380,6 +1390,16 @@ "output":{"shape":"DescribePrefixListsResult"}, "documentation":"

Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a gateway VPC endpoint.

" }, + "DescribePrincipalIdFormat":{ + "name":"DescribePrincipalIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePrincipalIdFormatRequest"}, + "output":{"shape":"DescribePrincipalIdFormatResult"}, + "documentation":"

Describes the ID format settings for the root user and all IAM roles and IAM users that have explicitly specified a longer ID (17-character ID) preference.

By default, all IAM roles and IAM users default to the same ID settings as the root user, unless they explicitly override the settings. This request is useful for identifying those IAM users and IAM roles that have overridden the default ID settings.

The following resource types support longer IDs: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection.

" + }, "DescribeRegions":{ "name":"DescribeRegions", "http":{ @@ -2059,7 +2079,7 @@ "requestUri":"/" }, "input":{"shape":"ModifyIdFormatRequest"}, - "documentation":"

Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created. The following resource types support longer IDs: instance | reservation | snapshot | volume.

This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user. If you're using this action as the root user, then these settings apply to the entire account, unless an IAM user explicitly overrides these settings for themselves. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" + "documentation":"

Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

This request can only be used to modify longer ID settings for resource types that are within the opt-in period. Resources currently in their opt-in period include: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-peering-connection.

This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user. If you're using this action as the root user, then these settings apply to the entire account, unless an IAM user explicitly overrides these settings for themselves. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" }, "ModifyIdentityIdFormat":{ "name":"ModifyIdentityIdFormat", @@ -2068,7 +2088,7 @@ "requestUri":"/" }, "input":{"shape":"ModifyIdentityIdFormatRequest"}, - "documentation":"

Modifies the ID format of a resource for a specified IAM user, IAM role, or the root user for an account; or all IAM users, IAM roles, and the root user for an account. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

The following resource types support longer IDs: instance | reservation | snapshot | volume. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

This setting applies to the principal specified in the request; it does not apply to the principal that makes the request.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" + "documentation":"

Modifies the ID format of a resource for a specified IAM user, IAM role, or the root user for an account; or all IAM users, IAM roles, and the root user for an account. You can specify that resources should receive longer IDs (17-character IDs) when they are created.

This request can only be used to modify longer ID settings for resource types that are within the opt-in period. Resources currently in their opt-in period include: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-peering-connection..

For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

This setting applies to the principal specified in the request; it does not apply to the principal that makes the request.

Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

" }, "ModifyImageAttribute":{ "name":"ModifyImageAttribute", @@ -5504,7 +5524,7 @@ }, "GroupName":{ "shape":"String", - "documentation":"

The name of the security group.

Constraints: Up to 255 characters in length

Constraints for EC2-Classic: ASCII characters

Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

" + "documentation":"

The name of the security group.

Constraints: Up to 255 characters in length. Cannot start with sg-.

Constraints for EC2-Classic: ASCII characters

Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

" }, "VpcId":{ "shape":"String", @@ -6937,6 +6957,30 @@ }, "documentation":"

Contains the output of DescribeAddresses.

" }, + "DescribeAggregateIdFormatRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeAggregateIdFormatResult":{ + "type":"structure", + "members":{ + "UseLongIdsAggregated":{ + "shape":"Boolean", + "documentation":"

Indicates whether all resrouces types in the region are configured to use longer IDs. This value will only be true if all users are configured to use longer IDs for all resources types in the region.

", + "locationName":"useLongIdsAggregated" + }, + "Statuses":{ + "shape":"IdFormatList", + "documentation":"

Information about each resource's ID format.

", + "locationName":"statusSet" + } + } + }, "DescribeAvailabilityZonesRequest":{ "type":"structure", "members":{ @@ -7534,7 +7578,7 @@ "members":{ "Resource":{ "shape":"String", - "documentation":"

The type of resource: instance | reservation | snapshot | volume

" + "documentation":"

The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection

" } }, "documentation":"

Contains the parameters for DescribeIdFormat.

" @@ -7561,7 +7605,7 @@ }, "Resource":{ "shape":"String", - "documentation":"

The type of resource: instance | reservation | snapshot | volume

", + "documentation":"

The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection

", "locationName":"resource" } }, @@ -8381,6 +8425,43 @@ }, "documentation":"

Contains the output of DescribePrefixLists.

" }, + "DescribePrincipalIdFormatRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Resources":{ + "shape":"ResourceList", + "documentation":"

The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | instance | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | reservation | route-table | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association | vpc-peering-connection

", + "locationName":"Resource" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to request the next page of results.

" + } + } + }, + "DescribePrincipalIdFormatResult":{ + "type":"structure", + "members":{ + "Principals":{ + "shape":"PrincipalIdFormatList", + "documentation":"

Information about the ID format settings for the ARN.

", + "locationName":"principalSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeRegionsRequest":{ "type":"structure", "members":{ @@ -8804,7 +8885,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another request with the returned NextToken value. This value can be between 5 and 1000.

" + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another request with the returned NextToken value. This value can be between 5 and 1000. If this parameter is not specified, then all results are returned.

" } }, "documentation":"

Contains the parameters for DescribeSecurityGroups.

" @@ -9206,7 +9287,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

The token required to retrieve the next set of results. This value is null when there are no more results to return.

", + "documentation":"

The token required to retrieve the next set of results. This value is an empty string when there are no more results to return.

", "locationName":"nextToken" }, "SpotPriceHistory":{ @@ -12217,7 +12298,7 @@ }, "Format":{ "shape":"String", - "documentation":"

The format of the disk image being imported.

Valid values: RAW | VHD | VMDK | OVA

" + "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK | OVA

" }, "SnapshotId":{ "shape":"String", @@ -12506,7 +12587,7 @@ }, "UserData":{ "shape":"UserData", - "documentation":"

The user data to make available to the instance. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

", + "documentation":"

The Base64-encoded user data to make available to the instance.

", "locationName":"userData" } }, @@ -14240,7 +14321,7 @@ "members":{ "UserData":{ "shape":"String", - "documentation":"

The user data to make available to the instances. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

", + "documentation":"

The Base64-encoded user data for the instance.

", "locationName":"userData" }, "SecurityGroups":{ @@ -15253,7 +15334,7 @@ "members":{ "Resource":{ "shape":"String", - "documentation":"

The type of resource: instance | reservation | snapshot | volume

" + "documentation":"

The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-peering-connection.

Alternatively, use the all-current option to include all resource types that are currently within their opt-in period for longer IDs.

" }, "UseLongIds":{ "shape":"Boolean", @@ -15277,7 +15358,7 @@ }, "Resource":{ "shape":"String", - "documentation":"

The type of resource: instance | reservation | snapshot | volume

", + "documentation":"

The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task | flow-log | image | import-task | internet-gateway | network-acl | network-acl-association | network-interface | network-interface-attachment | prefix-list | route-table | route-table-association | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association | vpc-peering-connection.

Alternatively, use the all-current option to include all resource types that are currently within their opt-in period for longer IDs.

", "locationName":"resource" }, "UseLongIds":{ @@ -15888,12 +15969,12 @@ "AddNetworkLoadBalancerArns":{ "shape":"ValueStringList", "documentation":"

The Amazon Resource Names (ARNs) of Network Load Balancers to add to your service configuration.

", - "locationName":"addNetworkLoadBalancerArn" + "locationName":"AddNetworkLoadBalancerArn" }, "RemoveNetworkLoadBalancerArns":{ "shape":"ValueStringList", "documentation":"

The Amazon Resource Names (ARNs) of Network Load Balancers to remove from your service configuration.

", - "locationName":"removeNetworkLoadBalancerArn" + "locationName":"RemoveNetworkLoadBalancerArn" } } }, @@ -17101,6 +17182,29 @@ "locationName":"item" } }, + "PrincipalIdFormat":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

PrincipalIdFormatARN description

", + "locationName":"arn" + }, + "Statuses":{ + "shape":"IdFormatList", + "documentation":"

PrincipalIdFormatStatuses description

", + "locationName":"statusSet" + } + }, + "documentation":"

PrincipalIdFormat description

" + }, + "PrincipalIdFormatList":{ + "type":"list", + "member":{ + "shape":"PrincipalIdFormat", + "locationName":"item" + } + }, "PrincipalType":{ "type":"string", "enum":[ @@ -18101,7 +18205,7 @@ }, "UserData":{ "shape":"String", - "documentation":"

The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using a command line tool, base64-encoding is performed for you and you can load the text from a file. Otherwise, you must provide base64-encoded text.

" + "documentation":"

The Base64-encoded user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows).

" }, "TagSpecifications":{ "shape":"LaunchTemplateTagSpecificationRequestList", @@ -18318,7 +18422,7 @@ }, "UserData":{ "shape":"String", - "documentation":"

The user data to make available to the instances. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

", + "documentation":"

The Base64-encoded user data for the instance.

", "locationName":"userData" } }, @@ -18989,6 +19093,13 @@ "type":"list", "member":{"shape":"String"} }, + "ResourceList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, "ResourceType":{ "type":"string", "enum":[ @@ -20633,7 +20744,7 @@ }, "Format":{ "shape":"String", - "documentation":"

The format of the disk image being imported.

Valid values: RAW | VHD | VMDK | OVA

" + "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK | OVA

" }, "Url":{ "shape":"String", @@ -20830,7 +20941,7 @@ }, "UserData":{ "shape":"String", - "documentation":"

The user data to make available to the instances. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

", + "documentation":"

The Base64-encoded user data to make available to the instances.

", "locationName":"userData" }, "WeightedCapacity":{ @@ -23305,7 +23416,7 @@ "members":{ "StaticRoutesOnly":{ "shape":"Boolean", - "documentation":"

Indicate whether the VPN connection uses static routes only. If you are creating a VPN connection for a device that does not support BGP, you must specify true.

Default: false

", + "documentation":"

Indicate whether the VPN connection uses static routes only. If you are creating a VPN connection for a device that does not support BGP, you must specify true. Use CreateVpnConnectionRoute to create a static route.

Default: false

", "locationName":"staticRoutesOnly" }, "TunnelOptions":{ diff --git a/botocore/data/ec2/2016-11-15/waiters-2.json b/botocore/data/ec2/2016-11-15/waiters-2.json old mode 100755 new mode 100644 index 71051948..33ea7b04 --- a/botocore/data/ec2/2016-11-15/waiters-2.json +++ b/botocore/data/ec2/2016-11-15/waiters-2.json @@ -390,6 +390,12 @@ "argument": "SpotInstanceRequests[].Status.Code", "expected": "fulfilled" }, + { + "state": "success", + "matcher": "pathAll", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "request-canceled-and-instance-running" + }, { "state": "failure", "matcher": "pathAny", @@ -413,6 +419,11 @@ "matcher": "pathAny", "argument": "SpotInstanceRequests[].Status.Code", "expected": "system-error" + }, + { + "state": "retry", + "matcher": "error", + "expected": "InvalidSpotInstanceRequestID.NotFound" } ] }, @@ -588,6 +599,24 @@ "state": "retry" } ] + }, + "VpcPeeringConnectionDeleted": { + "delay": 15, + "operation": "DescribeVpcPeeringConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "VpcPeeringConnections[].Status.Code" + }, + { + "matcher": "error", + "expected": "InvalidVpcPeeringConnectionID.NotFound", + "state": "success" + } + ] } } } diff --git a/botocore/data/ecr/2015-09-21/examples-1.json b/botocore/data/ecr/2015-09-21/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/ecs/2014-11-13/waiters-2.json b/botocore/data/ecs/2014-11-13/waiters-2.json index 8866d15f..8a0b19d8 100644 --- a/botocore/data/ecs/2014-11-13/waiters-2.json +++ b/botocore/data/ecs/2014-11-13/waiters-2.json @@ -66,7 +66,7 @@ "expected": true, "matcher": "path", "state": "success", - "argument": "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`" + "argument": "length(services[?!(length(deployments) == `1` && runningCount == desiredCount)]) == `0`" } ] }, diff --git a/botocore/data/elasticache/2015-02-02/waiters-2.json b/botocore/data/elasticache/2015-02-02/waiters-2.json index ccb904aa..c177d7b9 100644 --- a/botocore/data/elasticache/2015-02-02/waiters-2.json +++ b/botocore/data/elasticache/2015-02-02/waiters-2.json @@ -1,139 +1,143 @@ { - "version": 2, - "waiters": { - "CacheClusterAvailable": { - "delay": 30, - "operation": "DescribeCacheClusters", - "maxAttempts": 60, - "acceptors": [ - { - "expected": "available", - "matcher": "pathAll", - "state": "success", - "argument": "CacheClusters[].CacheClusterStatus" + "version":2, + "waiters":{ + "CacheClusterAvailable":{ + "acceptors":[ + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"available", + "matcher":"pathAll", + "state":"success" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"deleted", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"deleting", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"incompatible-network", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"restore-failed", + "matcher":"pathAny", + "state":"failure" + } + ], + "delay":15, + "description":"Wait until ElastiCache cluster is available.", + "maxAttempts":40, + "operation":"DescribeCacheClusters" }, - { - "expected": "deleted", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" + "CacheClusterDeleted":{ + "acceptors":[ + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"deleted", + "matcher":"pathAll", + "state":"success" + }, + { + "expected":"CacheClusterNotFound", + "matcher":"error", + "state":"success" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"available", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"creating", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"incompatible-network", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"modifying", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"restore-failed", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"snapshotting", + "matcher":"pathAny", + "state":"failure" + } + ], + "delay":15, + "description":"Wait until ElastiCache cluster is deleted.", + "maxAttempts":40, + "operation":"DescribeCacheClusters" }, - { - "expected": "deleting", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" + "ReplicationGroupAvailable":{ + "acceptors":[ + { + "argument":"ReplicationGroups[].Status", + "expected":"available", + "matcher":"pathAll", + "state":"success" + }, + { + "argument":"ReplicationGroups[].Status", + "expected":"deleted", + "matcher":"pathAny", + "state":"failure" + } + ], + "delay":15, + "description":"Wait until ElastiCache replication group is available.", + "maxAttempts":40, + "operation":"DescribeReplicationGroups" }, - { - "expected": "incompatible-network", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" - }, - { - "expected": "restore-failed", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" + "ReplicationGroupDeleted":{ + "acceptors":[ + { + "argument":"ReplicationGroups[].Status", + "expected":"deleted", + "matcher":"pathAll", + "state":"success" + }, + { + "argument":"ReplicationGroups[].Status", + "expected":"available", + "matcher":"pathAny", + "state":"failure" + }, + { + "expected":"ReplicationGroupNotFoundFault", + "matcher":"error", + "state":"success" + } + ], + "delay":15, + "description":"Wait until ElastiCache replication group is deleted.", + "maxAttempts":40, + "operation":"DescribeReplicationGroups" } - ] - }, - "CacheClusterDeleted": { - "delay": 30, - "operation": "DescribeCacheClusters", - "maxAttempts": 60, - "acceptors": [ - { - "expected": "CacheClusterNotFound", - "matcher": "error", - "state": "success" - }, - { - "expected": "creating", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" - }, - { - "expected": "modifying", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" - }, - { - "expected": "rebooting", - "matcher": "pathAny", - "state": "failure", - "argument": "CacheClusters[].CacheClusterStatus" - } - ] - }, - "ReplicationGroupAvailable": { - "delay": 30, - "operation": "DescribeReplicationGroups", - "maxAttempts": 60, - "acceptors": [ - { - "expected": "available", - "matcher": "pathAll", - "state": "success", - "argument": "ReplicationGroups[].Status" - }, - { - "expected": "deleted", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - }, - { - "expected": "deleting", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - }, - { - "expected": "incompatible-network", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - }, - { - "expected": "restore-failed", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - } - ] - }, - "ReplicationGroupDeleted": { - "delay": 30, - "operation": "DescribeReplicationGroups", - "maxAttempts": 60, - "acceptors": [ - { - "expected": "ReplicationGroupNotFoundFault", - "matcher": "error", - "state": "success" - }, - { - "expected": "creating", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - }, - { - "expected": "modifying", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - }, - { - "expected": "rebooting", - "matcher": "pathAny", - "state": "failure", - "argument": "ReplicationGroups[].Status" - } - ] } - } } diff --git a/botocore/data/elasticbeanstalk/2010-12-01/examples-1.json b/botocore/data/elasticbeanstalk/2010-12-01/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/elb/2012-06-01/waiters-2.json b/botocore/data/elb/2012-06-01/waiters-2.json index 143c8662..182e070b 100644 --- a/botocore/data/elb/2012-06-01/waiters-2.json +++ b/botocore/data/elb/2012-06-01/waiters-2.json @@ -1,6 +1,24 @@ { "version":2, "waiters":{ + "InstanceDeregistered": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "OutOfService", + "matcher": "pathAll", + "state": "success", + "argument": "InstanceStates[].State" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "success" + } + ] + }, "AnyInstanceInService":{ "acceptors":[ { @@ -21,6 +39,11 @@ "expected":"InService", "matcher":"pathAll", "state":"success" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "retry" } ], "delay":15, diff --git a/botocore/data/elbv2/2015-12-01/waiters-2.json b/botocore/data/elbv2/2015-12-01/waiters-2.json new file mode 100644 index 00000000..9f3d77d8 --- /dev/null +++ b/botocore/data/elbv2/2015-12-01/waiters-2.json @@ -0,0 +1,100 @@ +{ + "version": 2, + "waiters": { + "LoadBalancerExists": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "retry" + } + ] + }, + "LoadBalancerAvailable": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "LoadBalancers[].State.Code", + "expected": "active" + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "LoadBalancers[].State.Code", + "expected": "provisioning" + }, + { + "state": "retry", + "matcher": "error", + "expected": "LoadBalancerNotFound" + } + ] + }, + "LoadBalancersDeleted": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "state": "retry", + "matcher": "pathAll", + "argument": "LoadBalancers[].State.Code", + "expected": "active" + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "success" + } + ] + }, + "TargetInService":{ + "delay":15, + "maxAttempts":40, + "operation":"DescribeTargetHealth", + "acceptors":[ + { + "argument":"TargetHealthDescriptions[].TargetHealth.State", + "expected":"healthy", + "matcher":"pathAll", + "state":"success" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "retry" + } + ] + }, + "TargetDeregistered": { + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeTargetHealth", + "acceptors": [ + { + "matcher": "error", + "expected": "InvalidTarget", + "state": "success" + }, + { + "argument":"TargetHealthDescriptions[].TargetHealth.State", + "expected":"unused", + "matcher":"pathAll", + "state":"success" + } + ] + } + } +} diff --git a/botocore/data/emr/2009-03-31/waiters-2.json b/botocore/data/emr/2009-03-31/waiters-2.json index 45c3af53..abba8c3c 100644 --- a/botocore/data/emr/2009-03-31/waiters-2.json +++ b/botocore/data/emr/2009-03-31/waiters-2.json @@ -38,6 +38,31 @@ } ] }, + "StepComplete": { + "delay": 30, + "operation": "DescribeStep", + "maxAttempts": 60, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "COMPLETED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "FAILED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "CANCELLED" + } + ] + }, "ClusterTerminated": { "delay": 30, "operation": "DescribeCluster", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 07ad6683..0be5b6c7 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -952,6 +952,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-1" : { }, "us-west-2" : { } } @@ -1503,7 +1504,9 @@ "states" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, @@ -1676,6 +1679,8 @@ "waf-regional" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-west-1" : { }, @@ -1952,6 +1957,11 @@ "cn-northwest-1" : { } } }, + "sms" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "snowball" : { "endpoints" : { "cn-north-1" : { } diff --git a/botocore/data/gamelift/2015-10-01/examples-1.json b/botocore/data/gamelift/2015-10-01/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 78dfd40b..b4936bde 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -58,7 +58,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new Amazon GameLift build from a set of game server binary files stored in an Amazon Simple Storage Service (Amazon S3) location. To use this API call, create a .zip file containing all of the files for the build and store it in an Amazon S3 bucket under your AWS account. For help on packaging your build files and creating a build, see Uploading Your Game to Amazon GameLift.

Use this API action ONLY if you are storing your game build files in an Amazon S3 bucket. To create a build using files stored locally, use the CLI command upload-build , which uploads the build files from a file location you specify.

To create a new build using CreateBuild, identify the storage location and operating system of your game build. You also have the option of specifying a build name and version. If successful, this action creates a new build record with an unique build ID and in INITIALIZED status. Use the API call DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets to host your game.

Build-related operations include:

" + "documentation":"

Creates a new Amazon GameLift build record for your game server binary files and points to the location of your game server build files in an Amazon Simple Storage Service (Amazon S3) location.

Game server binaries must be combined into a .zip file for use with Amazon GameLift. See Uploading Your Game for more information.

To create new builds quickly and easily, use the AWS CLI command upload-build . This helper command uploads your build and creates a new build record in one step, and automatically handles the necessary permissions. See Upload Build Files to Amazon GameLift for more help.

The CreateBuild operation should be used only when you need to manually upload your build files, as in the following scenarios:

If successful, this operation creates a new build record with a unique build ID and places it in INITIALIZED status. You can use DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets.

Build-related operations include:

" }, "CreateFleet":{ "name":"CreateFleet", @@ -373,7 +373,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves properties for a build. To get a build record, specify a build ID. If successful, an object containing the build properties is returned.

Build-related operations include:

" + "documentation":"

Retrieves properties for a build. To request a build record, specify a build ID. If successful, an object containing the build properties is returned.

Build-related operations include:

" }, "DescribeEC2InstanceLimits":{ "name":"DescribeEC2InstanceLimits", @@ -565,7 +565,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves a set of one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

You can request data for a one or a list of ticket IDs. If the request is successful, a ticket object is returned for each requested ID. When specifying a list of ticket IDs, objects are returned only for tickets that currently exist.

Matchmaking-related operations include:

" + "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

Matchmaking-related operations include:

" }, "DescribeMatchmakingConfigurations":{ "name":"DescribeMatchmakingConfigurations", @@ -785,7 +785,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This API call is not currently in use. Retrieves a fresh set of upload credentials and the assigned Amazon S3 storage location for a specific build. Valid credentials are required to upload your game build files to Amazon S3.

" + "documentation":"

Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see CreateBuild.

To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

" }, "ResolveAlias":{ "name":"ResolveAlias", @@ -819,7 +819,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves a set of game sessions that match a set of search criteria and sorts them in a specified order. A game session search is limited to a single fleet. Search results include only game sessions that are in ACTIVE status. If you need to retrieve game sessions with a status other than active, use DescribeGameSessions. If you need to retrieve the protection policy for each game session, use DescribeGameSessionDetails.

You can search or sort by the following game session attributes:

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of GameSession objects matching the request is returned.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

Game-session-related operations include:

" + "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

Game-session-related operations include:

" }, "StartGameSessionPlacement":{ "name":"StartGameSessionPlacement", @@ -837,6 +837,22 @@ ], "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

Game-session-related operations include:

" }, + "StartMatchBackfill":{ + "name":"StartMatchBackfill", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMatchBackfillInput"}, + "output":{"shape":"StartMatchBackfillOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Set up Match Backfilling.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Matchmaking-related operations include:

" + }, "StartMatchmaking":{ "name":"StartMatchmaking", "http":{ @@ -1071,7 +1087,7 @@ "documentation":"

Unique identifier for a matchmaking ticket. The ticket must be in status REQUIRES_ACCEPTANCE; otherwise this request will fail.

" }, "PlayerIds":{ - "shape":"MatchmakingPlayerIdList", + "shape":"StringList", "documentation":"

Unique identifier for a player delivering the response. This parameter can include one or multiple player IDs.

" }, "AcceptanceType":{ @@ -1158,10 +1174,10 @@ }, "SDM":{ "shape":"StringDoubleMap", - "documentation":"

For a map of up to 10 type:value pairs. Maximum length for each string value is 100 characters.

" + "documentation":"

For a map of up to 10 data type:value pairs. Maximum length for each string value is 100 characters.

" } }, - "documentation":"

Values for use in Player attribute type:value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array or data map. Each AttributeValue object can use only one of the available properties.

" + "documentation":"

Values for use in Player attribute key:value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array or data map. Each AttributeValue object can use only one of the available properties.

" }, "AwsCredentials":{ "type":"structure", @@ -1182,7 +1198,7 @@ "documentation":"

Temporary access credentials used for uploading game build files to Amazon GameLift. They are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

", "sensitive":true }, - "Boolean":{"type":"boolean"}, + "BooleanModel":{"type":"boolean"}, "Build":{ "type":"structure", "members":{ @@ -1295,11 +1311,11 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Amazon S3 location of the game build files to be uploaded. The S3 bucket must be owned by the same AWS account that you're using to manage Amazon GameLift. It also must in the same region that you want to create a new build in. Before calling CreateBuild with this location, you must allow Amazon GameLift to access your Amazon S3 bucket (see Create a Build with Files in Amazon S3).

" + "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key, as well as a role ARN that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket must be in the same region that you want to create a new build in.

" }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

Operating system that the game server binaries are built to run on. This value determines the type of fleet resources that you can use for this build. If your game build contains multiple executables, they all must run on the same operating system.

" + "documentation":"

Operating system that the game server binaries are built to run on. This value determines the type of fleet resources that you can use for this build. If your game build contains multiple executables, they all must run on the same operating system. If an operating system is not specified when creating a build, Amazon GameLift uses the default value (WINDOWS_2012). This value cannot be changed later.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1313,11 +1329,11 @@ }, "UploadCredentials":{ "shape":"AwsCredentials", - "documentation":"

This element is not currently in use.

" + "documentation":"

This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an Amazon S3 bucket that is owned by Amazon GameLift. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

" }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Amazon S3 location specified in the request.

" + "documentation":"

Amazon S3 location for your game build file, including bucket name and key.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -1376,7 +1392,7 @@ }, "MetricGroups":{ "shape":"MetricGroupList", - "documentation":"

Names of metric groups to add this fleet to. Use an existing metric group name to add this fleet to the group. Or use a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" + "documentation":"

Name of a metric group to add this fleet to. A metric group tracks metrics across all fleets in the group. Use an existing metric group name to add this fleet to the group, or use a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", @@ -1421,7 +1437,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "CreatorId":{ "shape":"NonZeroAndMaxString", @@ -1437,7 +1453,7 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request action.

" @@ -1516,7 +1532,7 @@ "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ - "shape":"Boolean", + "shape":"BooleanModel", "documentation":"

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" }, "RuleSetName":{ @@ -1537,11 +1553,11 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2279,7 +2295,7 @@ "members":{ "TicketIds":{ "shape":"MatchmakingIdList", - "documentation":"

Unique identifier for a matchmaking ticket. To request all existing tickets, leave this parameter empty.

" + "documentation":"

Unique identifier for a matchmaking ticket. You can include up to 10 ID values.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2872,7 +2888,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.

" }, "IpAddress":{ "shape":"IpAddress", @@ -2892,7 +2908,11 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + }, + "MatchmakerData":{ + "shape":"MatchmakerData", + "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formated as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" } }, "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

Game-session-related operations include:

" @@ -2976,7 +2996,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -3024,7 +3044,11 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + }, + "MatchmakerData":{ + "shape":"MatchmakerData", + "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formated as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see http://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data.

" } }, "documentation":"

Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.

Game session placement-related operations include:

" @@ -3462,6 +3486,11 @@ "type":"list", "member":{"shape":"MatchedPlayerSession"} }, + "MatchmakerData":{ + "type":"string", + "max":390000, + "min":1 + }, "MatchmakingAcceptanceTimeoutInteger":{ "type":"integer", "max":600, @@ -3491,7 +3520,7 @@ "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ - "shape":"Boolean", + "shape":"BooleanModel", "documentation":"

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" }, "RuleSetName":{ @@ -3516,11 +3545,11 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" } }, "documentation":"

Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

" @@ -3552,10 +3581,6 @@ "min":1, "pattern":"[a-zA-Z0-9-\\.]+" }, - "MatchmakingPlayerIdList":{ - "type":"list", - "member":{"shape":"PlayerIdStringModel"} - }, "MatchmakingRequestTimeoutInteger":{ "type":"integer", "max":43200, @@ -3578,7 +3603,7 @@ "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build a certain kind of player match. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Create Matchmaking Rules for Your Game.

" + "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build a certain kind of player match. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

" }, "MatchmakingRuleSetList":{ "type":"list", @@ -3603,7 +3628,7 @@ }, "Status":{ "shape":"MatchmakingConfigurationStatus", - "documentation":"

Current status of the matchmaking request.

" + "documentation":"

Current status of the matchmaking request.

Matchmaking requests that fail to successfully complete (statuses FAILED, CANCELLED, TIMED_OUT) can be resubmitted as new requests with new ticket IDs.

" }, "StatusReason":{ "shape":"StringModel", @@ -3727,12 +3752,12 @@ "type":"structure", "members":{ "PlayerId":{ - "shape":"PlayerIdStringModel", + "shape":"NonZeroAndMaxString", "documentation":"

Unique identifier for a player

" }, "PlayerAttributes":{ "shape":"PlayerAttributeMap", - "documentation":"

Collection of name:value pairs containing player information for use in matchmaking. Player attribute names need to match playerAttributes names in the rule set being used. Example: \"PlayerAttributes\": {\"skill\": {\"N\": \"23\"}, \"gameMode\": {\"S\": \"deathmatch\"}}.

" + "documentation":"

Collection of key:value pairs containing player information for use in matchmaking. Player attribute keys must match the playerAttributes used in a matchmaking rule set. Example: \"PlayerAttributes\": {\"skill\": {\"N\": \"23\"}, \"gameMode\": {\"S\": \"deathmatch\"}}.

" }, "Team":{ "shape":"NonZeroAndMaxString", @@ -3766,12 +3791,6 @@ "max":25, "min":1 }, - "PlayerIdStringModel":{ - "type":"string", - "max":128, - "min":1, - "pattern":"[a-zA-Z0-9-\\.]+" - }, "PlayerLatency":{ "type":"structure", "members":{ @@ -4178,11 +4197,11 @@ }, "FilterExpression":{ "shape":"NonZeroAndMaxString", - "documentation":"

String containing the search criteria for the session search. If no filter expression is included, the request returns results for all game sessions in the fleet that are in ACTIVE status.

A filter expression can contain one or multiple conditions. Each condition consists of the following:

To chain multiple conditions in a single expression, use the logical keywords AND, OR, and NOT and parentheses as needed. For example: x AND y AND NOT z, NOT (x OR y).

Session search evaluates conditions from left to right using the following precedence rules:

  1. =, <>, <, >, <=, >=

  2. Parentheses

  3. NOT

  4. AND

  5. OR

For example, this filter expression retrieves game sessions hosting at least ten players that have an open player slot: \"maximumSessions>=10 AND hasAvailablePlayerSessions=true\".

" + "documentation":"

String containing the search criteria for the session search. If no filter expression is included, the request returns results for all game sessions in the fleet that are in ACTIVE status.

A filter expression can contain one or multiple conditions. Each condition consists of the following:

To chain multiple conditions in a single expression, use the logical keywords AND, OR, and NOT and parentheses as needed. For example: x AND y AND NOT z, NOT (x OR y).

Session search evaluates conditions from left to right using the following precedence rules:

  1. =, <>, <, >, <=, >=

  2. Parentheses

  3. NOT

  4. AND

  5. OR

For example, this filter expression retrieves game sessions hosting at least ten players that have an open player slot: \"maximumSessions>=10 AND hasAvailablePlayerSessions=true\".

" }, "SortExpression":{ "shape":"NonZeroAndMaxString", - "documentation":"

Instructions on how to sort the search results. If no sort expression is included, the request returns results in random order. A sort expression consists of the following elements:

For example, this sort expression returns the oldest active sessions first: \"SortExpression\": \"creationTimeMillis ASC\". Results with a null value for the sort operand are returned at the end of the list.

" + "documentation":"

Instructions on how to sort the search results. If no sort expression is included, the request returns results in random order. A sort expression consists of the following elements:

For example, this sort expression returns the oldest active sessions first: \"SortExpression\": \"creationTimeMillis ASC\". Results with a null value for the sort operand are returned at the end of the list.

" }, "Limit":{ "shape":"PositiveInteger", @@ -4261,7 +4280,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -4281,7 +4300,7 @@ }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request action.

" @@ -4296,6 +4315,43 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "StartMatchBackfillInput":{ + "type":"structure", + "required":[ + "ConfigurationName", + "GameSessionArn", + "Players" + ], + "members":{ + "TicketId":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the match backfill ticket status and retrieve match results.

" + }, + "ConfigurationName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Name of the matchmaker to use for this request. The name of the matchmaker that was used with the original game session is listed in the GameSession object, MatchmakerData property. This property contains a matchmaking configuration ARN value, which includes the matchmaker name. (In the ARN value \"arn:aws:gamelift:us-west-2:111122223333:matchmakingconfiguration/MM-4v4\", the matchmaking configuration name is \"MM-4v4\".) Use only the name for this parameter.

" + }, + "GameSessionArn":{ + "shape":"ArnStringModel", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + }, + "Players":{ + "shape":"PlayerList", + "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "StartMatchBackfillOutput":{ + "type":"structure", + "members":{ + "MatchmakingTicket":{ + "shape":"MatchmakingTicket", + "documentation":"

Ticket representing the backfill matchmaking request. This object includes the information in the request, ticket status, and match results as generated during the matchmaking process.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "StartMatchmakingInput":{ "type":"structure", "required":[ @@ -4305,7 +4361,7 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking ticket. Use this identifier to track the matchmaking ticket status and retrieve match results.

" + "documentation":"

Unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the matchmaking ticket status and retrieve match results.

" }, "ConfigurationName":{ "shape":"MatchmakingIdStringModel", @@ -4660,7 +4716,7 @@ "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ - "shape":"Boolean", + "shape":"BooleanModel", "documentation":"

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" }, "RuleSetName":{ @@ -4681,11 +4737,11 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4743,7 +4799,7 @@ "type":"structure", "members":{ "Valid":{ - "shape":"Boolean", + "shape":"BooleanModel", "documentation":"

Response indicating whether or not the rule set is valid.

" } }, @@ -4825,12 +4881,12 @@ "documentation":"

Additional messaging associated with the connection status.

" } }, - "documentation":"

Represents status information for a VPC peering connection. Status is associated with a VpcPeeringConnection object. Status codes and messages are provided from EC2 (). Connection status information is also communicated as a fleet Event.

" + "documentation":"

Represents status information for a VPC peering connection. Status is associated with a VpcPeeringConnection object. Status codes and messages are provided from EC2 (see VpcPeeringConnectionStateReason). Connection status information is also communicated as a fleet Event.

" }, "WholeNumber":{ "type":"integer", "min":0 } }, - "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Amazon GameLift provides tools for the following tasks: (1) acquire computing resources and deploy game servers, (2) scale game server capacity to meet player demand, (3) host game sessions and manage player access, and (4) track in-depth metrics on player usage and server performance.

The Amazon GameLift service API includes two important function sets:

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

MORE RESOURCES

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" + "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.

The Amazon GameLift service API includes two important function sets:

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

Learn more

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" } diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index a8681aec..8f6ec6ce 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -135,7 +135,7 @@ {"shape":"InvalidInputException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Creates a classifier in the user's account. This may be either a GrokClassifier or an XMLClassifier.

" + "documentation":"

Creates a classifier in the user's account. This may be a GrokClassifier, an XMLClassifier, or abbrev JsonClassifier, depending on which field of the request is present.

" }, "CreateConnection":{ "name":"CreateConnection", @@ -1087,7 +1087,7 @@ {"shape":"EntityNotFoundException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Modifies an existing classifier (either a GrokClassifier or an XMLClassifier).

" + "documentation":"

Modifies an existing classifier (a GrokClassifier, XMLClassifier, or JsonClassifier, depending on which field is present).

" }, "UpdateConnection":{ "name":"UpdateConnection", @@ -1665,9 +1665,13 @@ "XMLClassifier":{ "shape":"XMLClassifier", "documentation":"

An XMLClassifier object.

" + }, + "JsonClassifier":{ + "shape":"JsonClassifier", + "documentation":"

A JsonClassifier object.

" } }, - "documentation":"

Classifiers are written in Python and triggered during a crawl task. You can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier checks whether a given file is in a format it can handle, and if it is, the classifier creates a schema in the form of a StructType object that matches that data format.

A classifier can be either a grok classifier or an XML classifier, specified in one or the other field of the Classifier object.

" + "documentation":"

Classifiers are written in Python and triggered during a crawl task. You can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier checks whether a given file is in a format it can handle, and if it is, the classifier creates a schema in the form of a StructType object that matches that data format.

A classifier can be a grok classifier, an XML classifier, or a JSON classifier, asspecified in one of the fields in the Classifier object.

" }, "ClassifierList":{ "type":"list", @@ -2157,6 +2161,10 @@ "XMLClassifier":{ "shape":"CreateXMLClassifierRequest", "documentation":"

An XMLClassifier object specifying the classifier to create.

" + }, + "JsonClassifier":{ + "shape":"CreateJsonClassifierRequest", + "documentation":"

A JsonClassifier object specifying the classifier to create.

" } } }, @@ -2448,6 +2456,24 @@ } } }, + "CreateJsonClassifierRequest":{ + "type":"structure", + "required":[ + "Name", + "JsonPath" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the classifier.

" + }, + "JsonPath":{ + "shape":"JsonPath", + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + } + }, + "documentation":"

Specifies a JSON classifier for CreateClassifier to create.

" + }, "CreatePartitionRequest":{ "type":"structure", "required":[ @@ -4315,6 +4341,37 @@ }, "documentation":"

Specifies information used to update an existing job. Note that the previous job definition will be completely overwritten by this information.

" }, + "JsonClassifier":{ + "type":"structure", + "required":[ + "Name", + "JsonPath" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the classifier.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time this classifier was registered.

" + }, + "LastUpdated":{ + "shape":"Timestamp", + "documentation":"

The time this classifier was last updated.

" + }, + "Version":{ + "shape":"VersionId", + "documentation":"

The version of this classifier.

" + }, + "JsonPath":{ + "shape":"JsonPath", + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + } + }, + "documentation":"

A classifier for JSON content.

" + }, + "JsonPath":{"type":"string"}, "JsonValue":{"type":"string"}, "KeyString":{ "type":"string", @@ -5430,6 +5487,10 @@ "XMLClassifier":{ "shape":"UpdateXMLClassifierRequest", "documentation":"

An XMLClassifier object with updated fields.

" + }, + "JsonClassifier":{ + "shape":"UpdateJsonClassifierRequest", + "documentation":"

A JsonClassifier object with updated fields.

" } } }, @@ -5506,7 +5567,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example:  '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" } } }, @@ -5636,6 +5697,21 @@ } } }, + "UpdateJsonClassifierRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the classifier.

" + }, + "JsonPath":{ + "shape":"JsonPath", + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + } + }, + "documentation":"

Specifies a JSON classifier to be updated.

" + }, "UpdatePartitionRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/kinesis/2013-12-02/service-2.json b/botocore/data/kinesis/2013-12-02/service-2.json index 67f9b5aa..012059af 100644 --- a/botocore/data/kinesis/2013-12-02/service-2.json +++ b/botocore/data/kinesis/2013-12-02/service-2.json @@ -26,7 +26,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Adds or updates tags for the specified Kinesis stream. Each stream can have up to 10 tags.

If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.

" + "documentation":"

Adds or updates tags for the specified Kinesis data stream. Each stream can have up to 10 tags.

If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.

AddTagsToStream has a limit of five transactions per second per account.

" }, "CreateStream":{ "name":"CreateStream", @@ -40,7 +40,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Creates a Kinesis stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream.

You specify and control the number of shards that a stream is composed of. Each shard can support reads up to 5 transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. I the amount of data input increases or decreases, you can add or remove shards.

The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different regions, can have the same name.

CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Kinesis Streams immediately returns and sets the stream status to CREATING. After the stream is created, Kinesis Streams sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream.

You receive a LimitExceededException when making a CreateStream request when you try to do one of the following:

For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. To increase this limit, contact AWS Support.

You can use DescribeStream to check the stream status, which is returned in StreamStatus.

CreateStream has a limit of 5 transactions per second per account.

" + "documentation":"

Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream.

You specify and control the number of shards that a stream is composed of. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. If the amount of data input increases or decreases, you can add or remove shards.

The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by AWS Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name.

CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Kinesis Data Streams immediately returns and sets the stream status to CREATING. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream.

You receive a LimitExceededException when making a CreateStream request when you try to do one of the following:

For the default shard limit for an AWS account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support.

You can use DescribeStream to check the stream status, which is returned in StreamStatus.

CreateStream has a limit of five transactions per second per account.

" }, "DecreaseStreamRetentionPeriod":{ "name":"DecreaseStreamRetentionPeriod", @@ -52,9 +52,10 @@ "errors":[ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Decreases the Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream's retention period is 24 hours.

This operation may result in lost data. For example, if the stream's retention period is 48 hours and is decreased to 24 hours, any data already in the stream that is older than 24 hours is inaccessible.

" + "documentation":"

Decreases the Kinesis data stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream's retention period is 24 hours.

This operation may result in lost data. For example, if the stream's retention period is 48 hours and is decreased to 24 hours, any data already in the stream that is older than 24 hours is inaccessible.

" }, "DeleteStream":{ "name":"DeleteStream", @@ -67,7 +68,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Deletes a Kinesis stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it receives the exception ResourceNotFoundException.

If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Kinesis Streams completes the deletion.

Note: Kinesis Streams might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete.

When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream.

You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.

DeleteStream has a limit of 5 transactions per second per account.

" + "documentation":"

Deletes a Kinesis data stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it receives the exception ResourceNotFoundException.

If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Kinesis Data Streams completes the deletion.

Note: Kinesis Data Streams might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete.

When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream.

You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.

DeleteStream has a limit of five transactions per second per account.

" }, "DescribeLimits":{ "name":"DescribeLimits", @@ -80,7 +81,7 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

Describes the shard limits and usage for the account.

If you update your account limits, the old limits might be returned for a few minutes.

This operation has a limit of 1 transaction per second per account.

" + "documentation":"

Describes the shard limits and usage for the account.

If you update your account limits, the old limits might be returned for a few minutes.

This operation has a limit of one transaction per second per account.

" }, "DescribeStream":{ "name":"DescribeStream", @@ -94,7 +95,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Describes the specified Kinesis stream.

The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream.

You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Streams Developer Guide.

There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard.

This operation has a limit of 10 transactions per second per account.

" + "documentation":"

Describes the specified Kinesis data stream.

The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream.

You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Data Streams Developer Guide.

There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard.

This operation has a limit of 10 transactions per second per account.

" }, "DescribeStreamSummary":{ "name":"DescribeStreamSummary", @@ -108,7 +109,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Provides a summarized description of the specified Kinesis stream without the shard list.

The information returned includes the stream name, Amazon Resource Name (ARN), status, record retention period, approximate creation time, monitoring, encryption details, and open shard count.

" + "documentation":"

Provides a summarized description of the specified Kinesis data stream without the shard list.

The information returned includes the stream name, Amazon Resource Name (ARN), status, record retention period, approximate creation time, monitoring, encryption details, and open shard count.

" }, "DisableEnhancedMonitoring":{ "name":"DisableEnhancedMonitoring", @@ -140,7 +141,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Enables enhanced Kinesis stream monitoring for shard-level metrics.

" + "documentation":"

Enables enhanced Kinesis data stream monitoring for shard-level metrics.

" }, "GetRecords":{ "name":"GetRecords", @@ -162,7 +163,7 @@ {"shape":"KMSOptInRequired"}, {"shape":"KMSThrottlingException"} ], - "documentation":"

Gets data records from a Kinesis stream's shard.

Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. It might take multiple calls to get to a portion of the shard that contains records.

You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit.

The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second.

To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Streams Developer Guide).

Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side time stamp, whereas a client-side time stamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time stamp accuracy, or that the time stamp is always increasing. For example, records in a shard or across a stream might have time stamps that are out of order.

" + "documentation":"

Gets data records from a Kinesis data stream's shard.

Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. It might take multiple calls to get to a portion of the shard that contains records.

You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit.

The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next five seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the stream, subsequent calls made within the next one second throw ProvisionedThroughputExceededException. GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second.

To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Data Streams Developer Guide).

Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side time stamp, whereas a client-side time stamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time stamp accuracy, or that the time stamp is always increasing. For example, records in a shard or across a stream might have time stamps that are out of order.

" }, "GetShardIterator":{ "name":"GetShardIterator", @@ -177,7 +178,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes after it is returned to the requester.

A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards.

You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.

When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.

If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Streams Developer Guide.

If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards.

GetShardIterator has a limit of 5 transactions per second per account per open shard.

" + "documentation":"

Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes after it is returned to the requester.

A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards.

You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.

When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.

If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards.

GetShardIterator has a limit of five transactions per second per account per open shard.

" }, "IncreaseStreamRetentionPeriod":{ "name":"IncreaseStreamRetentionPeriod", @@ -189,9 +190,27 @@ "errors":[ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Increases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours (7 days).

If you choose a longer stream retention period, this operation increases the time period during which records that have not yet expired are accessible. However, it does not make previous, expired data (older than the stream's previous retention period) accessible after the operation has been called. For example, if a stream's retention period is set to 24 hours and is increased to 168 hours, any data that is older than 24 hours remains inaccessible to consumer applications.

" + "documentation":"

Increases the Kinesis data stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours (7 days).

If you choose a longer stream retention period, this operation increases the time period during which records that have not yet expired are accessible. However, it does not make previous, expired data (older than the stream's previous retention period) accessible after the operation has been called. For example, if a stream's retention period is set to 24 hours and is increased to 168 hours, any data that is older than 24 hours remains inaccessible to consumer applications.

" + }, + "ListShards":{ + "name":"ListShards", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListShardsInput"}, + "output":{"shape":"ListShardsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ExpiredNextTokenException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Lists the shards in a stream and provides information about each shard.

This API is a new operation that is used by the Amazon Kinesis Client Library (KCL). If you have a fine-grained IAM policy that only allows specific operations, you must update your policy to allow calls to this API. For more information, see Controlling Access to Amazon Kinesis Data Streams Resources Using IAM.

" }, "ListStreams":{ "name":"ListStreams", @@ -204,7 +223,7 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

Lists your Kinesis streams.

The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Kinesis Streams uses the default limit, which is currently 10.

You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.

ListStreams has a limit of 5 transactions per second per account.

" + "documentation":"

Lists your Kinesis data streams.

The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Kinesis Data Streams uses the default limit, which is currently 10.

You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.

ListStreams has a limit of five transactions per second per account.

" }, "ListTagsForStream":{ "name":"ListTagsForStream", @@ -219,7 +238,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Lists the tags for the specified Kinesis stream.

" + "documentation":"

Lists the tags for the specified Kinesis data stream. This operation has a limit of five transactions per second per account.

" }, "MergeShards":{ "name":"MergeShards", @@ -234,7 +253,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Merges two adjacent shards in a Kinesis stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.

MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Streams Developer Guide.

If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.

You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.

MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.

If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.

MergeShards has a limit of 5 transactions per second per account.

" + "documentation":"

Merges two adjacent shards in a Kinesis data stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.

MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Data Streams Developer Guide.

If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.

You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.

MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis Data Streams immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Kinesis Data Streams sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.

If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards, or SplitShard, you receive a LimitExceededException.

MergeShards has a limit of five transactions per second per account.

" }, "PutRecord":{ "name":"PutRecord", @@ -255,7 +274,7 @@ {"shape":"KMSOptInRequired"}, {"shape":"KMSThrottlingException"} ], - "documentation":"

Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Kinesis Streams to distribute data across shards. Kinesis Streams segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine the shard to which a given data record belongs.

Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" + "documentation":"

Writes a single data record into an Amazon Kinesis data stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Kinesis Data Streams to distribute data across shards. Kinesis Data Streams segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine the shard to which a given data record belongs.

Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.

PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.

If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" }, "PutRecords":{ "name":"PutRecords", @@ -276,7 +295,7 @@ {"shape":"KMSOptInRequired"}, {"shape":"KMSThrottlingException"} ], - "documentation":"

Writes multiple data records into a Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Kinesis Streams as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" + "documentation":"

Writes multiple data records into a Kinesis data stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Kinesis Data Streams as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Kinesis Data Streams attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" }, "RemoveTagsFromStream":{ "name":"RemoveTagsFromStream", @@ -291,7 +310,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Removes tags from the specified Kinesis stream. Removed tags are deleted and cannot be recovered after this operation successfully completes.

If you specify a tag that does not exist, it is ignored.

" + "documentation":"

Removes tags from the specified Kinesis data stream. Removed tags are deleted and cannot be recovered after this operation successfully completes.

If you specify a tag that does not exist, it is ignored.

RemoveTagsFromStream has a limit of five transactions per second per account.

" }, "SplitShard":{ "name":"SplitShard", @@ -306,7 +325,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Splits a shard into two new shards in the Kinesis stream, to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Kinesis Streams applications can simultaneously read data from the stream for real-time processing.

You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Streams Developer Guide.

You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Kinesis Streams immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Kinesis Streams sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. To increase this limit, contact AWS Support.

If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

SplitShard has a limit of 5 transactions per second per account.

" + "documentation":"

Splits a shard into two new shards in the Kinesis data stream, to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Kinesis Data Streams applications can simultaneously read data from the stream for real-time processing.

You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer Guide.

You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Kinesis Data Streams immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Kinesis Data Streams sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact AWS Support.

If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

SplitShard has a limit of five transactions per second per account.

" }, "StartStreamEncryption":{ "name":"StartStreamEncryption", @@ -327,7 +346,7 @@ {"shape":"KMSOptInRequired"}, {"shape":"KMSThrottlingException"} ], - "documentation":"

Enables or updates server-side encryption using an AWS KMS key for a specified stream.

Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Streams sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, encryption begins for records written to the stream.

API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to five seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you enable encryption, you can verify that encryption is applied by inspecting the API response from PutRecord or PutRecords.

" + "documentation":"

Enables or updates server-side encryption using an AWS KMS key for a specified stream.

Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, encryption begins for records written to the stream.

API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to five seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you enable encryption, you can verify that encryption is applied by inspecting the API response from PutRecord or PutRecords.

" }, "StopStreamEncryption":{ "name":"StopStreamEncryption", @@ -342,7 +361,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disables server-side encryption for a specified stream.

Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Streams sets the status of the stream back to ACTIVE. Stopping encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, records written to the stream are no longer encrypted by Kinesis Streams.

API Limits: You can successfully disable server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to five seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you disabled encryption, you can verify that encryption is not applied by inspecting the API response from PutRecord or PutRecords.

" + "documentation":"

Disables server-side encryption for a specified stream.

Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Stopping encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, records written to the stream are no longer encrypted by Kinesis Data Streams.

API Limits: You can successfully disable server-side encryption 25 times in a rolling 24-hour period.

Note: It can take up to five seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you disabled encryption, you can verify that encryption is not applied by inspecting the API response from PutRecord or PutRecords.

" }, "UpdateShardCount":{ "name":"UpdateShardCount", @@ -358,7 +377,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Kinesis Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Streams sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Kinesis Streams performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has the following limits, which are per region per account unless otherwise noted. You cannot:

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. To increase a limit, contact AWS Support.

" + "documentation":"

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Kinesis Data Streams performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has the following limits. You cannot do the following:

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To request an increase in the call rate limit, the shard limit for this API, or your overall shard limit, use the limits form.

" } }, "shapes":{ @@ -390,7 +409,7 @@ "members":{ "StreamName":{ "shape":"StreamName", - "documentation":"

A name to identify the stream. The stream name is scoped to the AWS account used by the application that creates the stream. It is also scoped by region. That is, two streams in two different AWS accounts can have the same name. Two streams in the same AWS account but in two different regions can also have the same name.

" + "documentation":"

A name to identify the stream. The stream name is scoped to the AWS account used by the application that creates the stream. It is also scoped by AWS Region. That is, two streams in two different AWS accounts can have the same name. Two streams in the same AWS account but in two different Regions can also have the same name.

" }, "ShardCount":{ "shape":"PositiveIntegerObject", @@ -416,7 +435,7 @@ "documentation":"

The name of the stream to modify.

" }, "RetentionPeriodHours":{ - "shape":"PositiveIntegerObject", + "shape":"RetentionPeriodHours", "documentation":"

The new retention period of the stream, in hours. Must be less than the current retention period.

" } }, @@ -485,7 +504,7 @@ "members":{ "StreamDescription":{ "shape":"StreamDescription", - "documentation":"

The current status of the stream, the stream ARN, an array of shard objects that comprise the stream, and whether there are more shards available.

" + "documentation":"

The current status of the stream, the stream Amazon Resource Name (ARN), an array of shard objects that comprise the stream, and whether there are more shards available.

" } }, "documentation":"

Represents the output for DescribeStream.

" @@ -519,11 +538,11 @@ "members":{ "StreamName":{ "shape":"StreamName", - "documentation":"

The name of the Kinesis stream for which to disable enhanced monitoring.

" + "documentation":"

The name of the Kinesis data stream for which to disable enhanced monitoring.

" }, "ShardLevelMetrics":{ "shape":"MetricsNameList", - "documentation":"

List of shard-level metrics to disable.

The following are the valid shard-level metrics. The value \"ALL\" disables every metric.

For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.

" + "documentation":"

List of shard-level metrics to disable.

The following are the valid shard-level metrics. The value \"ALL\" disables every metric.

For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide.

" } }, "documentation":"

Represents the input for DisableEnhancedMonitoring.

" @@ -541,7 +560,7 @@ }, "ShardLevelMetrics":{ "shape":"MetricsNameList", - "documentation":"

List of shard-level metrics to enable.

The following are the valid shard-level metrics. The value \"ALL\" enables every metric.

For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.

" + "documentation":"

List of shard-level metrics to enable.

The following are the valid shard-level metrics. The value \"ALL\" enables every metric.

For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide.

" } }, "documentation":"

Represents the input for EnableEnhancedMonitoring.

" @@ -558,7 +577,7 @@ "members":{ "ShardLevelMetrics":{ "shape":"MetricsNameList", - "documentation":"

List of shard-level metrics.

The following are the valid shard-level metrics. The value \"ALL\" enhances every metric.

For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.

" + "documentation":"

List of shard-level metrics.

The following are the valid shard-level metrics. The value \"ALL\" enhances every metric.

For more information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch in the Amazon Kinesis Data Streams Developer Guide.

" } }, "documentation":"

Represents enhanced metrics types.

" @@ -572,7 +591,7 @@ "members":{ "StreamName":{ "shape":"StreamName", - "documentation":"

The name of the Kinesis stream.

" + "documentation":"

The name of the Kinesis data stream.

" }, "CurrentShardLevelMetrics":{ "shape":"MetricsNameList", @@ -598,6 +617,14 @@ "documentation":"

The provided iterator exceeds the maximum age allowed.

", "exception":true }, + "ExpiredNextTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The pagination token passed to the ListShards operation is expired. For more information, see ListShardsInput$NextToken.

", + "exception":true + }, "GetRecordsInput":{ "type":"structure", "required":["ShardIterator"], @@ -647,11 +674,11 @@ "members":{ "StreamName":{ "shape":"StreamName", - "documentation":"

The name of the Amazon Kinesis stream.

" + "documentation":"

The name of the Amazon Kinesis data stream.

" }, "ShardId":{ "shape":"ShardId", - "documentation":"

The shard ID of the Kinesis Streams shard to get the iterator for.

" + "documentation":"

The shard ID of the Kinesis Data Streams shard to get the iterator for.

" }, "ShardIteratorType":{ "shape":"ShardIteratorType", @@ -712,7 +739,7 @@ "documentation":"

The name of the stream to modify.

" }, "RetentionPeriodHours":{ - "shape":"PositiveIntegerObject", + "shape":"RetentionPeriodHours", "documentation":"

The new retention period of the stream, in hours. Must be more than the current retention period.

" } }, @@ -811,6 +838,49 @@ "documentation":"

The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed.

", "exception":true }, + "ListShardsInput":{ + "type":"structure", + "members":{ + "StreamName":{ + "shape":"StreamName", + "documentation":"

The name of the data stream whose shards you want to list.

You cannot specify this parameter if you specify the NextToken parameter.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When the number of shards in the data stream is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of shards in the data stream, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListShards to list the next set of shards.

Don't specify StreamName or StreamCreationTimestamp if you specify NextToken because the latter unambiguously identifies the stream.

You can optionally specify a value for the MaxResults parameter when you specify NextToken. If you specify a MaxResults value that is less than the number of shards that the operation returns if you don't specify MaxResults, the response will contain a new NextToken value. You can use the new NextToken value in a subsequent call to the ListShards operation.

Tokens expire after 300 seconds. When you obtain a value for NextToken in the response to a call to ListShards, you have 300 seconds to use that value. If you specify an expired token in a call to ListShards, you get ExpiredNextTokenException.

" + }, + "ExclusiveStartShardId":{ + "shape":"ShardId", + "documentation":"

The ID of the shard to start the list with.

If you don't specify this parameter, the default behavior is for ListShards to list the shards starting with the first one in the stream.

You cannot specify this parameter if you specify NextToken.

" + }, + "MaxResults":{ + "shape":"ListShardsInputLimit", + "documentation":"

The maximum number of shards to return in a single call to ListShards. The minimum value you can specify for this parameter is 1, and the maximum is 1,000, which is also the default.

When the number of shards to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListShards to list the next set of shards.

" + }, + "StreamCreationTimestamp":{ + "shape":"Timestamp", + "documentation":"

Specify this input parameter to distinguish data streams that have the same name. For example, if you create a data stream and then delete it, and you later create another data stream with the same name, you can use this input parameter to specify which of the two streams you want to list the shards for.

You cannot specify this parameter if you specify the NextToken parameter.

" + } + } + }, + "ListShardsInputLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "ListShardsOutput":{ + "type":"structure", + "members":{ + "Shards":{ + "shape":"ShardList", + "documentation":"

An array of JSON objects. Each object represents one shard and specifies the IDs of the shard, the shard's parent, and the shard that's adjacent to the shard's parent. Each object also contains the starting and ending hash keys and the starting and ending sequence numbers for the shard.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

When the number of shards in the data stream is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of shards in the data stream, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListShards to list the next set of shards. For more information about the use of this pagination token when calling the ListShards operation, see ListShardsInput$NextToken.

Tokens expire after 300 seconds. When you obtain a value for NextToken in the response to a call to ListShards, you have 300 seconds to use that value. If you specify an expired token in a call to ListShards, you get ExpiredNextTokenException.

" + } + } + }, "ListStreamsInput":{ "type":"structure", "members":{ @@ -936,6 +1006,11 @@ "type":"long", "min":0 }, + "NextToken":{ + "type":"string", + "max":1048576, + "min":1 + }, "PartitionKey":{ "type":"string", "max":256, @@ -954,7 +1029,7 @@ "documentation":"

A message that provides information about the error.

" } }, - "documentation":"

The request rate for the stream is too high, or the requested data is too large for the available throughput. Reduce the frequency or size of your requests. For more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide, and Error Retries and Exponential Backoff in AWS in the AWS General Reference.

", + "documentation":"

The request rate for the stream is too high, or the requested data is too large for the available throughput. Reduce the frequency or size of your requests. For more information, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and Exponential Backoff in AWS in the AWS General Reference.

", "exception":true }, "PutRecordInput":{ @@ -975,7 +1050,7 @@ }, "PartitionKey":{ "shape":"PartitionKey", - "documentation":"

Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

" + "documentation":"

Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

" }, "ExplicitHashKey":{ "shape":"HashKey", @@ -1005,7 +1080,7 @@ }, "EncryptionType":{ "shape":"EncryptionType", - "documentation":"

The encryption type to use on the record. This parameter can be one of the following values:

" + "documentation":"

The encryption type to use on the record. This parameter can be one of the following values:

" } }, "documentation":"

Represents the output for PutRecord.

" @@ -1042,7 +1117,7 @@ }, "EncryptionType":{ "shape":"EncryptionType", - "documentation":"

The encryption type used on the records. This parameter can be one of the following values:

" + "documentation":"

The encryption type used on the records. This parameter can be one of the following values:

" } }, "documentation":"

PutRecords results.

" @@ -1064,7 +1139,7 @@ }, "PartitionKey":{ "shape":"PartitionKey", - "documentation":"

Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

" + "documentation":"

Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

" } }, "documentation":"

Represents the output for PutRecords.

" @@ -1121,7 +1196,7 @@ }, "Data":{ "shape":"Data", - "documentation":"

The data blob. The data in the blob is both opaque and immutable to Kinesis Streams, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

" + "documentation":"

The data blob. The data in the blob is both opaque and immutable to Kinesis Data Streams, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

" }, "PartitionKey":{ "shape":"PartitionKey", @@ -1129,10 +1204,10 @@ }, "EncryptionType":{ "shape":"EncryptionType", - "documentation":"

The encryption type used on the record. This parameter can be one of the following values:

" + "documentation":"

The encryption type used on the record. This parameter can be one of the following values:

" } }, - "documentation":"

The unit of data of the Kinesis stream, which is composed of a sequence number, a partition key, and a data blob.

" + "documentation":"

The unit of data of the Kinesis data stream, which is composed of a sequence number, a partition key, and a data blob.

" }, "RecordList":{ "type":"list", @@ -1178,6 +1253,11 @@ "documentation":"

The requested resource could not be found. The stream might not be specified correctly.

", "exception":true }, + "RetentionPeriodHours":{ + "type":"integer", + "max":168, + "min":1 + }, "ScalingType":{ "type":"string", "enum":["UNIFORM_SCALING"] @@ -1230,7 +1310,7 @@ "documentation":"

The range of possible sequence numbers for the shard.

" } }, - "documentation":"

A uniquely identified group of data records in a Kinesis stream.

" + "documentation":"

A uniquely identified group of data records in a Kinesis data stream.

" }, "ShardCountObject":{ "type":"integer", @@ -1303,7 +1383,7 @@ }, "KeyId":{ "shape":"KeyId", - "documentation":"

The GUID for the customer-managed KMS key to use for encryption. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Streams by specifying the alias aws/kinesis.

" + "documentation":"

The GUID for the customer-managed AWS KMS key to use for encryption. This value can be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data Streams by specifying the alias aws/kinesis.

" } } }, @@ -1325,7 +1405,7 @@ }, "KeyId":{ "shape":"KeyId", - "documentation":"

The GUID for the customer-managed KMS key to use for encryption. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Streams by specifying the alias aws/kinesis.

" + "documentation":"

The GUID for the customer-managed AWS KMS key to use for encryption. This value can be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data Streams by specifying the alias aws/kinesis.

" } } }, @@ -1353,7 +1433,7 @@ }, "StreamStatus":{ "shape":"StreamStatus", - "documentation":"

The current status of the stream being described. The stream status is one of the following states:

" + "documentation":"

The current status of the stream being described. The stream status is one of the following states:

" }, "Shards":{ "shape":"ShardList", @@ -1364,7 +1444,7 @@ "documentation":"

If set to true, more shards in the stream are available to describe.

" }, "RetentionPeriodHours":{ - "shape":"PositiveIntegerObject", + "shape":"RetentionPeriodHours", "documentation":"

The current retention period, in hours.

" }, "StreamCreationTimestamp":{ @@ -1377,11 +1457,11 @@ }, "EncryptionType":{ "shape":"EncryptionType", - "documentation":"

The server-side encryption type used on the stream. This parameter can be one of the following values:

" + "documentation":"

The server-side encryption type used on the stream. This parameter can be one of the following values:

" }, "KeyId":{ "shape":"KeyId", - "documentation":"

The GUID for the customer-managed KMS key to use for encryption. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Streams by specifying the alias aws/kinesis.

" + "documentation":"

The GUID for the customer-managed AWS KMS key to use for encryption. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data Streams by specifying the alias aws/kinesis.

" } }, "documentation":"

Represents the output for DescribeStream.

" @@ -1408,7 +1488,7 @@ }, "StreamStatus":{ "shape":"StreamStatus", - "documentation":"

The current status of the stream being described. The stream status is one of the following states:

" + "documentation":"

The current status of the stream being described. The stream status is one of the following states:

" }, "RetentionPeriodHours":{ "shape":"PositiveIntegerObject", @@ -1428,7 +1508,7 @@ }, "KeyId":{ "shape":"KeyId", - "documentation":"

The GUID for the customer-managed KMS key to use for encryption. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Streams by specifying the alias aws/kinesis.

" + "documentation":"

The GUID for the customer-managed AWS KMS key to use for encryption. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data Streams by specifying the alias aws/kinesis.

" }, "OpenShardCount":{ "shape":"ShardCountObject", @@ -1540,5 +1620,5 @@ } } }, - "documentation":"Amazon Kinesis Streams Service API Reference

Amazon Kinesis Streams is a managed service that scales elastically for real time processing of streaming big data.

" + "documentation":"Amazon Kinesis Data Streams Service API Reference

Amazon Kinesis Data Streams is a managed service that scales elastically for real-time processing of streaming big data.

" } diff --git a/botocore/data/lex-models/2017-04-19/service-2.json b/botocore/data/lex-models/2017-04-19/service-2.json index 86b6cbaf..c22a0a3b 100644 --- a/botocore/data/lex-models/2017-04-19/service-2.json +++ b/botocore/data/lex-models/2017-04-19/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"rest-json", "serviceFullName":"Amazon Lex Model Building Service", + "serviceId":"Lex Model Building Service", "signatureVersion":"v4", "signingName":"lex", "uid":"lex-models-2017-04-19" @@ -225,7 +226,7 @@ {"shape":"InternalFailureException"}, {"shape":"BadRequestException"} ], - "documentation":"

Deletes stored utterances.

Amazon Lex stores the utterances that users send to your bot unless the childDirected field in the bot is set to true. Utterances are stored for 15 days for use with the GetUtterancesView operation, and then stored indefinitely for use in improving the ability of your bot to respond to user input.

Use the DeleteStoredUtterances operation to manually delete stored utterances for a specific user.

This operation requires permissions for the lex:DeleteUtterances action.

" + "documentation":"

Deletes stored utterances.

Amazon Lex stores the utterances that users send to your bot. Utterances are stored for 15 days for use with the GetUtterancesView operation, and then stored indefinitely for use in improving the ability of your bot to respond to user input.

Use the DeleteStoredUtterances operation to manually delete stored utterances for a specific user.

This operation requires permissions for the lex:DeleteUtterances action.

" }, "GetBot":{ "name":"GetBot", @@ -526,7 +527,7 @@ {"shape":"InternalFailureException"}, {"shape":"BadRequestException"} ], - "documentation":"

Use the GetUtterancesView operation to get information about the utterances that your users have made to your bot. You can use this list to tune the utterances that your bot responds to.

For example, say that you have created a bot to order flowers. After your users have used your bot for a while, use the GetUtterancesView operation to see the requests that they have made and whether they have been successful. You might find that the utterance \"I want flowers\" is not being recognized. You could add this utterance to the OrderFlowers intent so that your bot recognizes that utterance.

After you publish a new version of a bot, you can get information about the old version and the new so that you can compare the performance across the two versions.

Data is available for the last 15 days. You can request information for up to 5 versions in each request. The response contains information about a maximum of 100 utterances for each version.

If the bot's childDirected field is set to true, utterances for the bot are not stored and cannot be retrieved with the GetUtterancesView operation. For more information, see PutBot.

This operation requires permissions for the lex:GetUtterancesView action.

" + "documentation":"

Use the GetUtterancesView operation to get information about the utterances that your users have made to your bot. You can use this list to tune the utterances that your bot responds to.

For example, say that you have created a bot to order flowers. After your users have used your bot for a while, use the GetUtterancesView operation to see the requests that they have made and whether they have been successful. You might find that the utterance \"I want flowers\" is not being recognized. You could add this utterance to the OrderFlowers intent so that your bot recognizes that utterance.

After you publish a new version of a bot, you can get information about the old version and the new so that you can compare the performance across the two versions.

Utterance statistics are generated once a day. Data is available for the last 15 days. You can request information for up to 5 versions in each request. The response contains information about a maximum of 100 utterances for each version.

This operation requires permissions for the lex:GetUtterancesView action.

" }, "PutBot":{ "name":"PutBot", @@ -544,7 +545,7 @@ {"shape":"BadRequestException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Creates an Amazon Lex conversational bot or replaces an existing bot. When you create or update a bot you are only required to specify a name. You can use this to add intents later, or to remove intents from an existing bot. When you create a bot with a name only, the bot is created or updated but Amazon Lex returns the response FAILED. You can build the bot after you add one or more intents. For more information about Amazon Lex bots, see how-it-works.

If you specify the name of an existing bot, the fields in the request replace the existing values in the $LATEST version of the bot. Amazon Lex removes any fields that you don't provide values for in the request, except for the idleTTLInSeconds and privacySettings fields, which are set to their default values. If you don't specify values for required fields, Amazon Lex throws an exception.

This operation requires permissions for the lex:PutBot action. For more information, see auth-and-access-control.

" + "documentation":"

Creates an Amazon Lex conversational bot or replaces an existing bot. When you create or update a bot you are only required to specify a name, a locale, and whether the bot is directed toward children under age 13. You can use this to add intents later, or to remove intents from an existing bot. When you create a bot with the minimum information, the bot is created or updated but Amazon Lex returns the response FAILED. You can build the bot after you add one or more intents. For more information about Amazon Lex bots, see how-it-works.

If you specify the name of an existing bot, the fields in the request replace the existing values in the $LATEST version of the bot. Amazon Lex removes any fields that you don't provide values for in the request, except for the idleTTLInSeconds and privacySettings fields, which are set to their default values. If you don't specify values for required fields, Amazon Lex throws an exception.

This operation requires permissions for the lex:PutBot action. For more information, see auth-and-access-control.

" }, "PutBotAlias":{ "name":"PutBotAlias", @@ -833,7 +834,8 @@ "enum":[ "Facebook", "Slack", - "Twilio-Sms" + "Twilio-Sms", + "Kik" ] }, "CodeHook":{ @@ -872,7 +874,8 @@ "type":"string", "enum":[ "PlainText", - "SSML" + "SSML", + "CustomPayload" ] }, "Count":{"type":"integer"}, @@ -2233,6 +2236,12 @@ } } }, + "GroupNumber":{ + "type":"integer", + "box":true, + "max":5, + "min":1 + }, "Intent":{ "type":"structure", "required":[ @@ -2363,6 +2372,10 @@ "content":{ "shape":"ContentString", "documentation":"

The text of the message.

" + }, + "groupNumber":{ + "shape":"GroupNumber", + "documentation":"

Identifies the message group that the message belongs to. When a group is assigned to a message, Amazon Lex returns one message from each group in the response.

" } }, "documentation":"

The message object that provides the message text and its type.

" @@ -2370,7 +2383,7 @@ "MessageList":{ "type":"list", "member":{"shape":"Message"}, - "max":5, + "max":15, "min":1 }, "MessageVersion":{ diff --git a/botocore/data/lex-runtime/2016-11-28/service-2.json b/botocore/data/lex-runtime/2016-11-28/service-2.json index ef4884e7..1b3bc10e 100644 --- a/botocore/data/lex-runtime/2016-11-28/service-2.json +++ b/botocore/data/lex-runtime/2016-11-28/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"rest-json", "serviceFullName":"Amazon Lex Runtime Service", + "serviceId":"Lex Runtime Service", "signatureVersion":"v4", "signingName":"lex", "uid":"runtime.lex-2016-11-28" @@ -209,6 +210,15 @@ "error":{"httpStatusCode":508}, "exception":true }, + "MessageFormatType":{ + "type":"string", + "enum":[ + "PlainText", + "CustomPayload", + "SSML", + "Composite" + ] + }, "NotAcceptableException":{ "type":"structure", "members":{ @@ -319,10 +329,16 @@ }, "message":{ "shape":"Text", - "documentation":"

Message to convey to the user. It can come from the bot's configuration or a code hook (Lambda function). If the current intent is not configured with a code hook or if the code hook returned Delegate as the dialogAction.type in its response, then Amazon Lex decides the next course of action and selects an appropriate message from the bot configuration based on the current user interaction context. For example, if Amazon Lex is not able to understand the user input, it uses a clarification prompt message (For more information, see the Error Handling section in the Amazon Lex console). Another example: if the intent requires confirmation before fulfillment, then Amazon Lex uses the confirmation prompt message in the intent configuration. If the code hook returns a message, Amazon Lex passes it as-is in its response to the client.

", + "documentation":"

The message to convey to the user. The message can come from the bot's configuration or from a Lambda function.

If the intent is not configured with a Lambda function, or if the Lambda function returned Delegate as the dialogAction.type its response, Amazon Lex decides on the next course of action and selects an appropriate message from the bot's configuration based on the current interaction context. For example, if Amazon Lex isn't able to understand user input, it uses a clarification prompt message.

When you create an intent you can assign messages to groups. When messages are assigned to groups Amazon Lex returns one message from each group in the response. The message field is an escaped JSON string containing the messages. For more information about the structure of the JSON string returned, see msg-prompts-formats.

If the Lambda function returns a message, Amazon Lex passes it to the client in its response.

", "location":"header", "locationName":"x-amz-lex-message" }, + "messageFormat":{ + "shape":"MessageFormatType", + "documentation":"

The format of the response message. One of the following values:

  • PlainText - The message contains plain UTF-8 text.

  • CustomPayload - The message is a custom format for the client.

  • SSML - The message contains text formatted for voice output.

  • Composite - The message contains an escaped JSON object containing one or more messages from the groups that messages were assigned to when the intent was created.

", + "location":"header", + "locationName":"x-amz-lex-message-format" + }, "dialogState":{ "shape":"DialogState", "documentation":"

Identifies the current state of the user interaction. Amazon Lex returns one of the following values as dialogState. The client can optionally use this information to customize the user interface.

  • ElicitIntent - Amazon Lex wants to elicit the user's intent. Consider the following examples:

    For example, a user might utter an intent (\"I want to order a pizza\"). If Amazon Lex cannot infer the user intent from this utterance, it will return this dialog state.

  • ConfirmIntent - Amazon Lex is expecting a \"yes\" or \"no\" response.

    For example, Amazon Lex wants user confirmation before fulfilling an intent. Instead of a simple \"yes\" or \"no\" response, a user might respond with additional information. For example, \"yes, but make it a thick crust pizza\" or \"no, I want to order a drink.\" Amazon Lex can process such additional information (in these examples, update the crust type slot or change the intent from OrderPizza to OrderDrink).

  • ElicitSlot - Amazon Lex is expecting the value of a slot for the current intent.

    For example, suppose that in the response Amazon Lex sends this message: \"What size pizza would you like?\". A user might reply with the slot value (e.g., \"medium\"). The user might also provide additional information in the response (e.g., \"medium thick crust pizza\"). Amazon Lex can process such additional information appropriately.

  • Fulfilled - Conveys that the Lambda function has successfully fulfilled the intent.

  • ReadyForFulfillment - Conveys that the client has to fulfill the request.

  • Failed - Conveys that the conversation with the user failed.

    This can happen for various reasons, including that the user does not provide an appropriate response to prompts from the service (you can configure how many times Amazon Lex can prompt a user for specific information), or if the Lambda function fails to fulfill the intent.

", @@ -406,7 +422,11 @@ }, "message":{ "shape":"Text", - "documentation":"

A message to convey to the user. It can come from the bot's configuration or a code hook (Lambda function). If the current intent is not configured with a code hook or the code hook returned Delegate as the dialogAction.type in its response, then Amazon Lex decides the next course of action and selects an appropriate message from the bot configuration based on the current user interaction context. For example, if Amazon Lex is not able to understand the user input, it uses a clarification prompt message (for more information, see the Error Handling section in the Amazon Lex console). Another example: if the intent requires confirmation before fulfillment, then Amazon Lex uses the confirmation prompt message in the intent configuration. If the code hook returns a message, Amazon Lex passes it as-is in its response to the client.

" + "documentation":"

The message to convey to the user. The message can come from the bot's configuration or from a Lambda function.

If the intent is not configured with a Lambda function, or if the Lambda function returned Delegate as the dialogAction.type its response, Amazon Lex decides on the next course of action and selects an appropriate message from the bot's configuration based on the current interaction context. For example, if Amazon Lex isn't able to understand user input, it uses a clarification prompt message.

When you create an intent you can assign messages to groups. When messages are assigned to groups Amazon Lex returns one message from each group in the response. The message field is an escaped JSON string containing the messages. For more information about the structure of the JSON string returned, see msg-prompts-formats.

If the Lambda function returns a message, Amazon Lex passes it to the client in its response.

" + }, + "messageFormat":{ + "shape":"MessageFormatType", + "documentation":"

The format of the response message. One of the following values:

  • PlainText - The message contains plain UTF-8 text.

  • CustomPayload - The message is a custom format defined by the Lambda function.

  • SSML - The message contains text formatted for voice output.

  • Composite - The message contains an escaped JSON object containing one or more messages from the groups that messages were assigned to when the intent was created.

" }, "dialogState":{ "shape":"DialogState", diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index c4356d97..8f799e14 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -655,6 +655,52 @@ } ], "documentation": "Stops a running channel" + }, + "UpdateChannel": { + "name": "UpdateChannel", + "http": { + "method": "PUT", + "requestUri": "/prod/channels/{channelId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateChannelRequest" + }, + "output": { + "shape": "UpdateChannelResponse", + "documentation": "Channel is successfully updated." + }, + "errors": [ + { + "shape": "UnprocessableEntityException", + "documentation": "The channel configuration failed validation and could not be updated." + }, + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to update the channel." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad Gateway Error" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "ConflictException", + "documentation": "The channel is unable to update due to an issue with channel resources." + } + ], + "documentation": "Updates a channel." } }, "shapes": { @@ -1555,7 +1601,7 @@ "CaptionChannel": { "shape": "__integer", "locationName": "captionChannel", - "documentation": "Channel to insert closed captions. Each channel mapping must have a unique channel number (maximum of 4)" + "documentation": "The closed caption channel being described by this CaptionLanguageMapping. Each channel mapping must have a unique channel number (maximum of 4)" }, "LanguageCode": { "shape": "__string", @@ -1823,7 +1869,8 @@ "Reserved": { "shape": "__string", "locationName": "reserved", - "documentation": "Reserved for future use." + "documentation": "Deprecated field that's only usable by whitelisted customers.", + "deprecated": true }, "RoleArn": { "shape": "__string", @@ -1868,7 +1915,8 @@ "Reserved": { "shape": "__string", "locationName": "reserved", - "documentation": "Reserved for future use." + "documentation": "Deprecated field that's only usable by whitelisted customers.", + "deprecated": true }, "RoleArn": { "shape": "__string", @@ -6122,6 +6170,97 @@ }, "documentation": "Placeholder documentation for UnprocessableEntityException" }, + "UpdateChannel": { + "type": "structure", + "members": { + "Destinations": { + "shape": "ListOfOutputDestination", + "locationName": "destinations", + "documentation": "A list of output destinations for this channel." + }, + "EncoderSettings": { + "shape": "EncoderSettings", + "locationName": "encoderSettings", + "documentation": "The encoder settings for this channel." + }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification", + "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "The name of the channel." + }, + "RoleArn": { + "shape": "__string", + "locationName": "roleArn", + "documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. If you do not specify this on an update call but the role was previously set that role will be removed." + } + }, + "documentation": "Placeholder documentation for UpdateChannel" + }, + "UpdateChannelRequest": { + "type": "structure", + "members": { + "ChannelId": { + "shape": "__string", + "location": "uri", + "locationName": "channelId", + "documentation": "channel ID" + }, + "Destinations": { + "shape": "ListOfOutputDestination", + "locationName": "destinations", + "documentation": "A list of output destinations for this channel." + }, + "EncoderSettings": { + "shape": "EncoderSettings", + "locationName": "encoderSettings", + "documentation": "The encoder settings for this channel." + }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification", + "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "The name of the channel." + }, + "RoleArn": { + "shape": "__string", + "locationName": "roleArn", + "documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. If you do not specify this on an update call but the role was previously set that role will be removed." + } + }, + "documentation": "A request to update a channel.", + "required": [ + "ChannelId" + ] + }, + "UpdateChannelResponse": { + "type": "structure", + "members": { + "Channel": { + "shape": "Channel", + "locationName": "channel" + } + }, + "documentation": "Placeholder documentation for UpdateChannelResponse" + }, + "UpdateChannelResultModel": { + "type": "structure", + "members": { + "Channel": { + "shape": "Channel", + "locationName": "channel" + } + }, + "documentation": "The updated channel's description." + }, "ValidationError": { "type": "structure", "members": { diff --git a/botocore/data/mediastore/2017-09-01/service-2.json b/botocore/data/mediastore/2017-09-01/service-2.json index 86a3b6f8..4e2ce7ec 100644 --- a/botocore/data/mediastore/2017-09-01/service-2.json +++ b/botocore/data/mediastore/2017-09-01/service-2.json @@ -60,6 +60,22 @@ ], "documentation":"

Deletes the access policy that is associated with the specified container.

" }, + "DeleteCorsPolicy":{ + "name":"DeleteCorsPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCorsPolicyInput"}, + "output":{"shape":"DeleteCorsPolicyOutput"}, + "errors":[ + {"shape":"ContainerInUseException"}, + {"shape":"ContainerNotFoundException"}, + {"shape":"CorsPolicyNotFoundException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Deletes the cross-origin resource sharing (CORS) configuration information that is set for the container.

To use this operation, you must have permission to perform the MediaStore:DeleteCorsPolicy action. The container owner has this permission by default and can grant this permission to others.

" + }, "DescribeContainer":{ "name":"DescribeContainer", "http":{ @@ -72,7 +88,7 @@ {"shape":"ContainerNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the properties of the requested container. This returns a single Container object based on ContainerName. To return all Container objects that are associated with a specified AWS account, use ListContainers.

" + "documentation":"

Retrieves the properties of the requested container. This request is commonly used to retrieve the endpoint of a container. An endpoint is a value assigned by the service when a new container is created. A container's endpoint does not change after it has been assigned. The DescribeContainer request returns a single Container object based on ContainerName. To return all Container objects that are associated with a specified AWS account, use ListContainers.

" }, "GetContainerPolicy":{ "name":"GetContainerPolicy", @@ -90,6 +106,22 @@ ], "documentation":"

Retrieves the access policy for the specified container. For information about the data that is included in an access policy, see the AWS Identity and Access Management User Guide.

" }, + "GetCorsPolicy":{ + "name":"GetCorsPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCorsPolicyInput"}, + "output":{"shape":"GetCorsPolicyOutput"}, + "errors":[ + {"shape":"ContainerInUseException"}, + {"shape":"ContainerNotFoundException"}, + {"shape":"CorsPolicyNotFoundException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns the cross-origin resource sharing (CORS) configuration information that is set for the container.

To use this operation, you must have permission to perform the MediaStore:GetCorsPolicy action. By default, the container owner has this permission and can grant it to others.

" + }, "ListContainers":{ "name":"ListContainers", "http":{ @@ -117,15 +149,44 @@ {"shape":"InternalServerError"} ], "documentation":"

Creates an access policy for the specified container to restrict the users and clients that can access it. For information about the data that is included in an access policy, see the AWS Identity and Access Management User Guide.

For this release of the REST API, you can create only one policy for a container. If you enter PutContainerPolicy twice, the second command modifies the existing policy.

" + }, + "PutCorsPolicy":{ + "name":"PutCorsPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutCorsPolicyInput"}, + "output":{"shape":"PutCorsPolicyOutput"}, + "errors":[ + {"shape":"ContainerNotFoundException"}, + {"shape":"ContainerInUseException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Sets the cross-origin resource sharing (CORS) configuration on a container so that the container can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your AWS Elemental MediaStore container at my.example.container.com by using the browser's XMLHttpRequest capability.

To enable CORS on a container, you attach a CORS policy to the container. In the CORS policy, you configure rules that identify origins and the HTTP methods that can be executed on your container. The policy can contain up to 398,000 characters. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.

" } }, "shapes":{ + "AllowedHeaders":{ + "type":"list", + "member":{"shape":"Header"}, + "max":100, + "min":0 + }, + "AllowedMethods":{ + "type":"list", + "member":{"shape":"MethodName"} + }, + "AllowedOrigins":{ + "type":"list", + "member":{"shape":"Origin"} + }, "Container":{ "type":"structure", "members":{ "Endpoint":{ "shape":"Endpoint", - "documentation":"

The DNS endpoint of the container. Use from 1 to 255 characters. Use this endpoint to identify this container when sending requests to the data plane.

" + "documentation":"

The DNS endpoint of the container. Use the endpoint to identify the specific container when sending requests to the data plane. The service assigns this value when the container is created. Once the value has been assigned, it does not change.

" }, "CreationTime":{ "shape":"TimeStamp", @@ -199,6 +260,47 @@ "max":16, "min":1 }, + "CorsPolicy":{ + "type":"list", + "member":{"shape":"CorsRule"}, + "documentation":"

The CORS policy of the container.

", + "max":100, + "min":1 + }, + "CorsPolicyNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Could not perform an operation on a policy that does not exist.

", + "exception":true + }, + "CorsRule":{ + "type":"structure", + "members":{ + "AllowedOrigins":{ + "shape":"AllowedOrigins", + "documentation":"

One or more response headers that you want users to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).

Each CORS rule must have at least one AllowedOrigin element. The string value can include only one wildcard character (*), for example, http://*.example.com. Additionally, you can specify only one wildcard character to allow cross-origin access for all origins.

" + }, + "AllowedMethods":{ + "shape":"AllowedMethods", + "documentation":"

Identifies an HTTP method that the origin that is specified in the rule is allowed to execute.

Each CORS rule must contain at least one AllowedMethod and one AllowedOrigin element.

" + }, + "AllowedHeaders":{ + "shape":"AllowedHeaders", + "documentation":"

Specifies which headers are allowed in a preflight OPTIONS request through the Access-Control-Request-Headers header. Each header name that is specified in Access-Control-Request-Headers must have a corresponding entry in the rule. Only the headers that were requested are sent back.

This element can contain only one wildcard character (*).

" + }, + "MaxAgeSeconds":{ + "shape":"MaxAgeSeconds", + "documentation":"

The time in seconds that your browser caches the preflight response for the specified resource.

A CORS rule can have only one MaxAgeSeconds element.

" + }, + "ExposeHeaders":{ + "shape":"ExposeHeaders", + "documentation":"

One or more headers in the response that you want users to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).

This element is optional for each rule.

" + } + }, + "documentation":"

A rule for a CORS policy. You can add up to 100 rules to a CORS policy. If more than one rule applies, the service uses the first applicable rule listed.

" + }, "CreateContainerInput":{ "type":"structure", "required":["ContainerName"], @@ -215,7 +317,7 @@ "members":{ "Container":{ "shape":"Container", - "documentation":"

ContainerARN: The Amazon Resource Name (ARN) of the newly created container. The ARN has the following format: arn:aws:<region>:<account that owns this container>:container/<name of container>. For example: arn:aws:mediastore:us-west-2:111122223333:container/movies

ContainerName: The container name as specified in the request.

CreationTime: Unix timestamp.

Status: The status of container creation or deletion. The status is one of the following: CREATING, ACTIVE, or DELETING. While the service is creating the container, the status is CREATING. When an endpoint is available, the status changes to ACTIVE.

The return value does not include the container's endpoint. To make downstream requests, you must obtain this value by using DescribeContainer or ListContainers.

" + "documentation":"

ContainerARN: The Amazon Resource Name (ARN) of the newly created container. The ARN has the following format: arn:aws:<region>:<account that owns this container>:container/<name of container>. For example: arn:aws:mediastore:us-west-2:111122223333:container/movies

ContainerName: The container name as specified in the request.

CreationTime: Unix time stamp.

Status: The status of container creation or deletion. The status is one of the following: CREATING, ACTIVE, or DELETING. While the service is creating the container, the status is CREATING. When an endpoint is available, the status changes to ACTIVE.

The return value does not include the container's endpoint. To make downstream requests, you must obtain this value by using DescribeContainer or ListContainers.

" } } }, @@ -249,6 +351,21 @@ "members":{ } }, + "DeleteCorsPolicyInput":{ + "type":"structure", + "required":["ContainerName"], + "members":{ + "ContainerName":{ + "shape":"ContainerName", + "documentation":"

The name of the container to remove the policy from.

" + } + } + }, + "DeleteCorsPolicyOutput":{ + "type":"structure", + "members":{ + } + }, "DescribeContainerInput":{ "type":"structure", "members":{ @@ -278,6 +395,12 @@ "min":1, "pattern":"[ \\w:\\.\\?-]+" }, + "ExposeHeaders":{ + "type":"list", + "member":{"shape":"Header"}, + "max":100, + "min":0 + }, "GetContainerPolicyInput":{ "type":"structure", "required":["ContainerName"], @@ -298,6 +421,29 @@ } } }, + "GetCorsPolicyInput":{ + "type":"structure", + "required":["ContainerName"], + "members":{ + "ContainerName":{ + "shape":"ContainerName", + "documentation":"

The name of the container that the policy is assigned to.

" + } + } + }, + "GetCorsPolicyOutput":{ + "type":"structure", + "required":["CorsPolicy"], + "members":{ + "CorsPolicy":{"shape":"CorsPolicy"} + } + }, + "Header":{ + "type":"string", + "max":8192, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, "InternalServerError":{ "type":"structure", "members":{ @@ -342,6 +488,24 @@ } } }, + "MaxAgeSeconds":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "MethodName":{ + "type":"string", + "enum":[ + "PUT", + "GET", + "DELETE", + "HEAD" + ] + }, + "Origin":{ + "type":"string", + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, "PaginationToken":{ "type":"string", "max":255, @@ -378,6 +542,28 @@ "members":{ } }, + "PutCorsPolicyInput":{ + "type":"structure", + "required":[ + "ContainerName", + "CorsPolicy" + ], + "members":{ + "ContainerName":{ + "shape":"ContainerName", + "documentation":"

The name of the container that you want to assign the CORS policy to.

" + }, + "CorsPolicy":{ + "shape":"CorsPolicy", + "documentation":"

The CORS policy to apply to the container.

" + } + } + }, + "PutCorsPolicyOutput":{ + "type":"structure", + "members":{ + } + }, "TimeStamp":{"type":"timestamp"} }, "documentation":"

An AWS Elemental MediaStore container is a namespace that holds folders and objects. You use a container endpoint to create, read, and delete objects.

" diff --git a/botocore/data/opsworks/2013-02-18/service-2.json b/botocore/data/opsworks/2013-02-18/service-2.json index 351143c5..92e2fc80 100644 --- a/botocore/data/opsworks/2013-02-18/service-2.json +++ b/botocore/data/opsworks/2013-02-18/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS OpsWorks", + "serviceId":"OpsWorks", "signatureVersion":"v4", "targetPrefix":"OpsWorks_20130218", "uid":"opsworks-2013-02-18" @@ -438,6 +439,15 @@ "output":{"shape":"DescribeMyUserProfileResult"}, "documentation":"

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, + "DescribeOperatingSystems":{ + "name":"DescribeOperatingSystems", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeOperatingSystemsResponse"}, + "documentation":"

Describes the operating systems that are supported by AWS OpsWorks Stacks.

" + }, "DescribePermissions":{ "name":"DescribePermissions", "http":{ @@ -1289,7 +1299,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

" + "documentation":"

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

" }, "HostnameTheme":{ "shape":"String", @@ -1734,7 +1744,7 @@ }, "Os":{ "shape":"String", - "documentation":"

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information on the supported operating systems, see Operating SystemsFor more information on how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

" + "documentation":"

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information on the supported operating systems, see Operating SystemsFor more information on how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

" }, "AmiId":{ "shape":"String", @@ -1924,7 +1934,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" + "documentation":"

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" }, "HostnameTheme":{ "shape":"String", @@ -2025,7 +2035,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.

" + "documentation":"

The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, RdsDbInstance, or None.

" }, "Arn":{ "shape":"String", @@ -2321,15 +2331,15 @@ "members":{ "StackId":{ "shape":"String", - "documentation":"

The stack ID. If you include this parameter, DescribeDeployments returns a description of the commands associated with the specified stack.

" + "documentation":"

The stack ID. If you include this parameter, the command returns a description of the commands associated with the specified stack.

" }, "AppId":{ "shape":"String", - "documentation":"

The app ID. If you include this parameter, DescribeDeployments returns a description of the commands associated with the specified app.

" + "documentation":"

The app ID. If you include this parameter, the command returns a description of the commands associated with the specified app.

" }, "DeploymentIds":{ "shape":"Strings", - "documentation":"

An array of deployment IDs to be described. If you include this parameter, DescribeDeployments returns a description of the specified deployments. Otherwise, it returns a description of every deployment.

" + "documentation":"

An array of deployment IDs to be described. If you include this parameter, the command returns a description of the specified deployments. Otherwise, it returns a description of every deployment.

" } } }, @@ -2508,6 +2518,13 @@ }, "documentation":"

Contains the response to a DescribeMyUserProfile request.

" }, + "DescribeOperatingSystemsResponse":{ + "type":"structure", + "members":{ + "OperatingSystems":{"shape":"OperatingSystems"} + }, + "documentation":"

The response to a DescribeOperatingSystems request.

" + }, "DescribePermissionsRequest":{ "type":"structure", "members":{ @@ -2790,7 +2807,7 @@ }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

" + "documentation":"

The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes.

If you specify the io1 volume type, you must also specify a value for the Iops attribute. The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. AWS uses the default volume size (in GiB) specified in the AMI attributes to set IOPS to 50 x (volume size).

" }, "DeleteOnTermination":{ "shape":"Boolean", @@ -3220,6 +3237,7 @@ "shape":"Integer", "documentation":"

The number of instances with start_failed status.

" }, + "StopFailed":{"shape":"Integer"}, "Stopped":{ "shape":"Integer", "documentation":"

The number of instances with stopped status.

" @@ -3471,6 +3489,62 @@ "min":1 }, "NextToken":{"type":"string"}, + "OperatingSystem":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the operating system, such as Amazon Linux 2017.09.

" + }, + "Id":{ + "shape":"String", + "documentation":"

The ID of a supported operating system, such as Amazon Linux 2017.09.

" + }, + "Type":{ + "shape":"String", + "documentation":"

The type of a supported operating system, either Linux or Windows.

" + }, + "ConfigurationManagers":{ + "shape":"OperatingSystemConfigurationManagers", + "documentation":"

Supported configuration manager name and versions for an AWS OpsWorks Stacks operating system.

" + }, + "ReportedName":{ + "shape":"String", + "documentation":"

A short name for the operating system manufacturer.

" + }, + "ReportedVersion":{ + "shape":"String", + "documentation":"

The version of the operating system, including the release and edition, if applicable.

" + }, + "Supported":{ + "shape":"Boolean", + "documentation":"

Indicates that an operating system is not supported for new instances.

" + } + }, + "documentation":"

Describes supported operating systems in AWS OpsWorks Stacks.

" + }, + "OperatingSystemConfigurationManager":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the configuration manager, which is Chef.

" + }, + "Version":{ + "shape":"String", + "documentation":"

The versions of the configuration manager that are supported by an operating system.

" + } + }, + "documentation":"

A block that contains information about the configuration manager (Chef) and the versions of the configuration manager that are supported for an operating system.

" + }, + "OperatingSystemConfigurationManagers":{ + "type":"list", + "member":{"shape":"OperatingSystemConfigurationManager"} + }, + "OperatingSystems":{ + "type":"list", + "member":{"shape":"OperatingSystem"} + }, "Parameters":{ "type":"map", "key":{"shape":"String"}, @@ -4207,7 +4281,8 @@ "InstanceId":{ "shape":"String", "documentation":"

The instance ID.

" - } + }, + "Force":{"shape":"Boolean"} } }, "StopStackRequest":{ @@ -4421,7 +4496,7 @@ }, "Os":{ "shape":"String", - "documentation":"

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information on the supported operating systems, see Operating Systems. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

" + "documentation":"

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information on the supported operating systems, see Operating Systems. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

" }, "AmiId":{ "shape":"String", @@ -4576,7 +4651,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" + "documentation":"

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" }, "HostnameTheme":{ "shape":"String", @@ -4773,7 +4848,8 @@ "Iops":{ "shape":"Integer", "documentation":"

For PIOPS volumes, the IOPS per disk.

" - } + }, + "Encrypted":{"shape":"Boolean"} }, "documentation":"

Describes an instance's Amazon EBS volume.

" }, @@ -4803,11 +4879,15 @@ }, "VolumeType":{ "shape":"String", - "documentation":"

The volume type:

  • standard - Magnetic

  • io1 - Provisioned IOPS (SSD)

  • gp2 - General Purpose (SSD)

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic

  • io1 - Provisioned IOPS (SSD)

  • gp2 - General Purpose (SSD)

  • st1 - Throughput Optimized hard disk drive (HDD)

  • sc1 - Cold HDD

" }, "Iops":{ "shape":"Integer", "documentation":"

For PIOPS volumes, the IOPS per disk.

" + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether an Amazon EBS volume is encrypted. For more information, see Amazon EBS Encryption.

" } }, "documentation":"

Describes an Amazon EBS volume configuration.

" @@ -4863,5 +4943,5 @@ "documentation":"

Describes a time-based instance's auto scaling schedule. The schedule consists of a set of key-value pairs.

  • The key is the time period (a UTC hour) and must be an integer from 0 - 23.

  • The value indicates whether the instance should be online or offline for the specified period, and must be set to \"on\" or \"off\"

The default setting for all time periods is off, so you use the following parameters primarily to specify the online periods. You don't have to explicitly specify offline periods unless you want to change an online period to an offline period.

The following example specifies that the instance should be online for four hours, from UTC 1200 - 1600. It will be off for the remainder of the day.

{ \"12\":\"on\", \"13\":\"on\", \"14\":\"on\", \"15\":\"on\" }

" } }, - "documentation":"AWS OpsWorks

Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes.

AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

SDKs and CLI

The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

Endpoints

AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created.

  • opsworks.us-east-1.amazonaws.com

  • opsworks.us-east-2.amazonaws.com

  • opsworks.us-west-1.amazonaws.com

  • opsworks.us-west-2.amazonaws.com

  • opsworks.eu-west-1.amazonaws.com

  • opsworks.eu-west-2.amazonaws.com

  • opsworks.eu-central-1.amazonaws.com

  • opsworks.ap-northeast-1.amazonaws.com

  • opsworks.ap-northeast-2.amazonaws.com

  • opsworks.ap-south-1.amazonaws.com

  • opsworks.ap-southeast-1.amazonaws.com

  • opsworks.ap-southeast-2.amazonaws.com

  • opsworks.sa-east-1.amazonaws.com

Chef Versions

When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

" + "documentation":"AWS OpsWorks

Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes.

AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

SDKs and CLI

The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

Endpoints

AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created.

  • opsworks.us-east-1.amazonaws.com

  • opsworks.us-east-2.amazonaws.com

  • opsworks.us-west-1.amazonaws.com

  • opsworks.us-west-2.amazonaws.com

  • opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console)

  • opsworks.eu-west-1.amazonaws.com

  • opsworks.eu-west-2.amazonaws.com

  • opsworks.eu-west-3.amazonaws.com

  • opsworks.eu-central-1.amazonaws.com

  • opsworks.ap-northeast-1.amazonaws.com

  • opsworks.ap-northeast-2.amazonaws.com

  • opsworks.ap-south-1.amazonaws.com

  • opsworks.ap-southeast-1.amazonaws.com

  • opsworks.ap-southeast-2.amazonaws.com

  • opsworks.sa-east-1.amazonaws.com

Chef Versions

When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

" } diff --git a/botocore/data/rds/2014-10-31/examples-1.json b/botocore/data/rds/2014-10-31/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/rds/2014-10-31/waiters-2.json b/botocore/data/rds/2014-10-31/waiters-2.json index 6bd80883..c698be52 100644 --- a/botocore/data/rds/2014-10-31/waiters-2.json +++ b/botocore/data/rds/2014-10-31/waiters-2.json @@ -41,18 +41,6 @@ "matcher": "pathAny", "state": "failure", "argument": "DBInstances[].DBInstanceStatus" - }, - { - "expected": "incompatible-parameters", - "matcher": "pathAny", - "state": "failure", - "argument": "DBInstances[].DBInstanceStatus" - }, - { - "expected": "incompatible-restore", - "matcher": "pathAny", - "state": "failure", - "argument": "DBInstances[].DBInstanceStatus" } ] }, @@ -61,17 +49,17 @@ "operation": "DescribeDBInstances", "maxAttempts": 60, "acceptors": [ - { - "expected": "DBInstanceNotFound", - "matcher": "error", - "state": "success" - }, { "expected": "deleted", "matcher": "pathAll", "state": "success", "argument": "DBInstances[].DBInstanceStatus" }, + { + "expected": "DBInstanceNotFound", + "matcher": "error", + "state": "success" + }, { "expected": "creating", "matcher": "pathAny", @@ -98,6 +86,91 @@ } ] }, + "DBSnapshotAvailable": { + "delay": 30, + "operation": "DescribeDBSnapshots", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + } + ] + }, + "DBSnapshotDeleted": { + "delay": 30, + "operation": "DescribeDBSnapshots", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "DBSnapshotNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBSnapshots[].Status" + } + ] + }, "DBSnapshotCompleted": { "delay": 15, "operation": "DescribeDBSnapshots", diff --git a/botocore/data/redshift/2012-12-01/waiters-2.json b/botocore/data/redshift/2012-12-01/waiters-2.json index d91b0eb3..164e9b0d 100644 --- a/botocore/data/redshift/2012-12-01/waiters-2.json +++ b/botocore/data/redshift/2012-12-01/waiters-2.json @@ -42,13 +42,32 @@ "argument": "Clusters[].ClusterStatus" }, { - "expected": "rebooting", + "expected": "modifying", "matcher": "pathAny", "state": "failure", "argument": "Clusters[].ClusterStatus" } ] }, + "ClusterRestored": { + "operation": "DescribeClusters", + "maxAttempts": 30, + "delay": 60, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Clusters[].RestoreStatus.Status", + "expected": "completed" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Clusters[].ClusterStatus", + "expected": "deleting" + } + ] + }, "SnapshotAvailable": { "delay": 15, "operation": "DescribeClusterSnapshots", diff --git a/botocore/data/s3/2006-03-01/waiters-2.json b/botocore/data/s3/2006-03-01/waiters-2.json index 53bc2bc6..b508a8f5 100644 --- a/botocore/data/s3/2006-03-01/waiters-2.json +++ b/botocore/data/s3/2006-03-01/waiters-2.json @@ -11,6 +11,16 @@ "matcher": "status", "state": "success" }, + { + "expected": 301, + "matcher": "status", + "state": "success" + }, + { + "expected": 403, + "matcher": "status", + "state": "success" + }, { "expected": 404, "matcher": "status", diff --git a/botocore/data/servicecatalog/2015-12-10/service-2.json b/botocore/data/servicecatalog/2015-12-10/service-2.json index e4be74c5..bfae6871 100644 --- a/botocore/data/servicecatalog/2015-12-10/service-2.json +++ b/botocore/data/servicecatalog/2015-12-10/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Service Catalog", + "serviceId":"Service Catalog", "signatureVersion":"v4", "targetPrefix":"AWS242ServiceCatalogService", "uid":"servicecatalog-2015-12-10" @@ -149,6 +150,21 @@ ], "documentation":"

Creates a product.

" }, + "CreateProvisionedProductPlan":{ + "name":"CreateProvisionedProductPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateProvisionedProductPlanInput"}, + "output":{"shape":"CreateProvisionedProductPlanOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidStateException"} + ], + "documentation":"

Creates a plan. A plan includes the list of resources that will be created (when provisioning a new product) or modified (when updating a provisioned product) when the plan is executed.

You can create one plan per provisioned product. To create a plan for an existing provisioned product, it's status must be AVAILBLE or TAINTED.

To view the resource changes in the change set, use DescribeProvisionedProductPlan. To create or modify the provisioned product, use ExecuteProvisionedProductPlan.

" + }, "CreateProvisioningArtifact":{ "name":"CreateProvisioningArtifact", "http":{ @@ -238,6 +254,20 @@ ], "documentation":"

Deletes the specified product.

You cannot delete a product if it was shared with you or is associated with a portfolio.

" }, + "DeleteProvisionedProductPlan":{ + "name":"DeleteProvisionedProductPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteProvisionedProductPlanInput"}, + "output":{"shape":"DeleteProvisionedProductPlanOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the specified plan.

" + }, "DeleteProvisioningArtifact":{ "name":"DeleteProvisioningArtifact", "http":{ @@ -346,6 +376,20 @@ ], "documentation":"

Gets information about the specified provisioned product.

" }, + "DescribeProvisionedProductPlan":{ + "name":"DescribeProvisionedProductPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProvisionedProductPlanInput"}, + "output":{"shape":"DescribeProvisionedProductPlanOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} + ], + "documentation":"

Gets information about the resource changes for the specified plan.

" + }, "DescribeProvisioningArtifact":{ "name":"DescribeProvisioningArtifact", "http":{ @@ -371,7 +415,7 @@ {"shape":"InvalidParametersException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets information about the configuration required to provision the specified product using the specified provisioning artifact.

If the output contains a TagOption key with an empty list of values, there is a TagOption conflict for that key. The end user cannot take action to fix the conflict, and launch is not blocked. In subsequent calls to ProvisionProduct, do not include conflicted TagOption keys as tags, or this will cause the error \"Parameter validation failed: Missing required parameter in Tags[N]:Value\" and tag the provisioned product with the value sc-tagoption-conflict-portfolioId-productId.

" + "documentation":"

Gets information about the configuration required to provision the specified product using the specified provisioning artifact.

If the output contains a TagOption key with an empty list of values, there is a TagOption conflict for that key. The end user cannot take action to fix the conflict, and launch is not blocked. In subsequent calls to ProvisionProduct, do not include conflicted TagOption keys as tags, or this causes the error \"Parameter validation failed: Missing required parameter in Tags[N]:Value\". Tag the provisioned product with the value sc-tagoption-conflict-portfolioId-productId.

" }, "DescribeRecord":{ "name":"DescribeRecord", @@ -443,6 +487,21 @@ ], "documentation":"

Disassociates the specified TagOption from the specified resource.

" }, + "ExecuteProvisionedProductPlan":{ + "name":"ExecuteProvisionedProductPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteProvisionedProductPlanInput"}, + "output":{"shape":"ExecuteProvisionedProductPlanOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidStateException"} + ], + "documentation":"

Provisions or modifies a product based on the resource changes for the specified plan.

" + }, "ListAcceptedPortfolioShares":{ "name":"ListAcceptedPortfolioShares", "http":{ @@ -538,6 +597,20 @@ ], "documentation":"

Lists all principal ARNs associated with the specified portfolio.

" }, + "ListProvisionedProductPlans":{ + "name":"ListProvisionedProductPlans", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListProvisionedProductPlansInput"}, + "output":{"shape":"ListProvisionedProductPlansOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} + ], + "documentation":"

Lists the plans for the specified provisioned product or all plans the user has access to.

" + }, "ListProvisioningArtifacts":{ "name":"ListProvisioningArtifacts", "http":{ @@ -607,7 +680,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"DuplicateResourceException"} ], - "documentation":"

Provisions the specified product.

A provisioned product is a resourced instance of a product. For example, provisioning a product based on a CloudFormation template launches a CloudFormation stack and its underlying resources. You can check the status of this request using DescribeRecord.

If the request contains a tag key with an empty list of values, there is a tag conflict for that key. Do not include conflicted keys as tags, or this will cause the error \"Parameter validation failed: Missing required parameter in Tags[N]:Value\".

" + "documentation":"

Provisions the specified product.

A provisioned product is a resourced instance of a product. For example, provisioning a product based on a CloudFormation template launches a CloudFormation stack and its underlying resources. You can check the status of this request using DescribeRecord.

If the request contains a tag key with an empty list of values, there is a tag conflict for that key. Do not include conflicted keys as tags, or this causes the error \"Parameter validation failed: Missing required parameter in Tags[N]:Value\".

" }, "RejectPortfolioShare":{ "name":"RejectPortfolioShare", @@ -633,7 +706,7 @@ "errors":[ {"shape":"InvalidParametersException"} ], - "documentation":"

Lists the provisioned products that are available (not terminated).

" + "documentation":"

Lists the provisioned products that are available (not terminated).

To use additional filtering, see SearchProvisionedProducts.

" }, "SearchProducts":{ "name":"SearchProducts", @@ -662,6 +735,19 @@ ], "documentation":"

Gets information about the products for the specified portfolio or all products.

" }, + "SearchProvisionedProducts":{ + "name":"SearchProvisionedProducts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchProvisionedProductsInput"}, + "output":{"shape":"SearchProvisionedProductsOutput"}, + "errors":[ + {"shape":"InvalidParametersException"} + ], + "documentation":"

Gets information about the provisioned products that meet the specified criteria.

" + }, "TerminateProvisionedProduct":{ "name":"TerminateProvisionedProduct", "http":{ @@ -912,6 +998,30 @@ } }, "AttributeValue":{"type":"string"}, + "CausingEntity":{"type":"string"}, + "ChangeAction":{ + "type":"string", + "enum":[ + "ADD", + "MODIFY", + "REMOVE" + ] + }, + "CloudWatchDashboard":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"CloudWatchDashboardName", + "documentation":"

The name of the CloudWatch dashboard.

" + } + }, + "documentation":"

Information about a CloudWatch dashboard.

" + }, + "CloudWatchDashboardName":{"type":"string"}, + "CloudWatchDashboards":{ + "type":"list", + "member":{"shape":"CloudWatchDashboard"} + }, "ConstraintDescription":{ "type":"string", "max":2000 @@ -1113,7 +1223,7 @@ }, "Tags":{ "shape":"AddTags", - "documentation":"

The tags to associate with the portfolio.

" + "documentation":"

One or more tags.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", @@ -1209,7 +1319,7 @@ }, "Tags":{ "shape":"AddTags", - "documentation":"

The tags to associate with the product.

" + "documentation":"

One or more tags.

" }, "ProvisioningArtifactParameters":{ "shape":"ProvisioningArtifactProperties", @@ -1239,6 +1349,89 @@ } } }, + "CreateProvisionedProductPlanInput":{ + "type":"structure", + "required":[ + "PlanName", + "PlanType", + "ProductId", + "ProvisionedProductName", + "ProvisioningArtifactId", + "IdempotencyToken" + ], + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "PlanName":{ + "shape":"ProvisionedProductPlanName", + "documentation":"

The name of the plan.

" + }, + "PlanType":{ + "shape":"ProvisionedProductPlanType", + "documentation":"

The plan type.

" + }, + "NotificationArns":{ + "shape":"NotificationArns", + "documentation":"

Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events.

" + }, + "PathId":{ + "shape":"Id", + "documentation":"

The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path. To list the paths for a product, use ListLaunchPaths.

" + }, + "ProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "ProvisionedProductName":{ + "shape":"ProvisionedProductName", + "documentation":"

A user-friendly name for the provisioned product. This value must be unique for the AWS account and cannot be updated after the product is provisioned.

" + }, + "ProvisioningArtifactId":{ + "shape":"Id", + "documentation":"

The identifier of the provisioning artifact.

" + }, + "ProvisioningParameters":{ + "shape":"UpdateProvisioningParameters", + "documentation":"

Parameters specified by the administrator that are required for provisioning the product.

" + }, + "IdempotencyToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"Tags", + "documentation":"

One or more tags.

" + } + } + }, + "CreateProvisionedProductPlanOutput":{ + "type":"structure", + "members":{ + "PlanName":{ + "shape":"ProvisionedProductPlanName", + "documentation":"

The name of the plan.

" + }, + "PlanId":{ + "shape":"Id", + "documentation":"

The plan identifier.

" + }, + "ProvisionProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "ProvisionedProductName":{ + "shape":"ProvisionedProductName", + "documentation":"

The user-friendly name of the provisioned product.

" + }, + "ProvisioningArtifactId":{ + "shape":"Id", + "documentation":"

The identifier of the provisioning artifact.

" + } + } + }, "CreateProvisioningArtifactInput":{ "type":"structure", "required":[ @@ -1395,6 +1588,29 @@ "members":{ } }, + "DeleteProvisionedProductPlanInput":{ + "type":"structure", + "required":["PlanId"], + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "PlanId":{ + "shape":"Id", + "documentation":"

The plan identifier.

" + }, + "IgnoreErrors":{ + "shape":"IgnoreErrors", + "documentation":"

If set to true, AWS Service Catalog stops managing the specified provisioned product even if it cannot delete the underlying resources.

" + } + } + }, + "DeleteProvisionedProductPlanOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteProvisioningArtifactInput":{ "type":"structure", "required":[ @@ -1623,6 +1839,49 @@ "ProvisionedProductDetail":{ "shape":"ProvisionedProductDetail", "documentation":"

Information about the provisioned product.

" + }, + "CloudWatchDashboards":{ + "shape":"CloudWatchDashboards", + "documentation":"

Any CloudWatch dashboards that were created when provisioning the product.

" + } + } + }, + "DescribeProvisionedProductPlanInput":{ + "type":"structure", + "required":["PlanId"], + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "PlanId":{ + "shape":"Id", + "documentation":"

The plan identifier.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of items to return with this call.

" + }, + "PageToken":{ + "shape":"PageToken", + "documentation":"

The page token for the next set of results. To retrieve the first set of results, use null.

" + } + } + }, + "DescribeProvisionedProductPlanOutput":{ + "type":"structure", + "members":{ + "ProvisionedProductPlanDetails":{ + "shape":"ProvisionedProductPlanDetails", + "documentation":"

Information about the plan.

" + }, + "ResourceChanges":{ + "shape":"ResourceChanges", + "documentation":"

Information about the resources changes that will occur when the plan is executed.

" + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

" } } }, @@ -1856,6 +2115,44 @@ }, "ErrorCode":{"type":"string"}, "ErrorDescription":{"type":"string"}, + "EvaluationType":{ + "type":"string", + "enum":[ + "STATIC", + "DYNAMIC" + ] + }, + "ExecuteProvisionedProductPlanInput":{ + "type":"structure", + "required":[ + "PlanId", + "IdempotencyToken" + ], + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "PlanId":{ + "shape":"Id", + "documentation":"

The plan identifier.

" + }, + "IdempotencyToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", + "idempotencyToken":true + } + } + }, + "ExecuteProvisionedProductPlanOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{ + "shape":"RecordDetail", + "documentation":"

Information about the result of provisioning the product.

" + } + } + }, "HasDefaultPath":{"type":"boolean"}, "Id":{ "type":"string", @@ -2150,6 +2447,44 @@ } } }, + "ListProvisionedProductPlansInput":{ + "type":"structure", + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "ProvisionProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "PageSize":{ + "shape":"PageSize", + "documentation":"

The maximum number of items to return with this call.

" + }, + "PageToken":{ + "shape":"PageToken", + "documentation":"

The page token for the next set of results. To retrieve the first set of results, use null.

" + }, + "AccessLevelFilter":{ + "shape":"AccessLevelFilter", + "documentation":"

The access level to use to obtain results. The default is User.

" + } + } + }, + "ListProvisionedProductPlansOutput":{ + "type":"structure", + "members":{ + "ProvisionedProductPlans":{ + "shape":"ProvisionedProductPlans", + "documentation":"

Information about the plans.

" + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

" + } + } + }, "ListProvisioningArtifactsInput":{ "type":"structure", "required":["ProductId"], @@ -2312,6 +2647,7 @@ } } }, + "LogicalResourceId":{"type":"string"}, "NoEcho":{"type":"boolean"}, "NotificationArn":{ "type":"string", @@ -2355,6 +2691,13 @@ "type":"string", "max":4096 }, + "PhysicalId":{"type":"string"}, + "PhysicalResourceId":{"type":"string"}, + "PlanResourceType":{ + "type":"string", + "max":256, + "min":1 + }, "PortfolioDescription":{ "type":"string", "max":2000 @@ -2380,7 +2723,7 @@ }, "CreatedTime":{ "shape":"CreationTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" }, "ProviderName":{ "shape":"ProviderName", @@ -2484,7 +2827,7 @@ }, "CreatedTime":{ "shape":"CreatedTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" } }, "documentation":"

Information about a product view.

" @@ -2578,9 +2921,10 @@ }, "documentation":"

Summary information about a product view.

" }, + "PropertyName":{"type":"string"}, "ProviderName":{ "type":"string", - "max":20, + "max":50, "min":1 }, "ProvisionProductInput":{ @@ -2618,7 +2962,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to use as provisioning options.

" + "documentation":"

One or more tags.

" }, "NotificationArns":{ "shape":"NotificationArns", @@ -2636,10 +2980,80 @@ "members":{ "RecordDetail":{ "shape":"RecordDetail", - "documentation":"

Information about the result of ProvisionProduct.

" + "documentation":"

Information about the result of provisioning the product.

" } } }, + "ProvisionedProductAttribute":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ProvisionedProductNameOrArn", + "documentation":"

The user-friendly name of the provisioned product.

" + }, + "Arn":{ + "shape":"ProvisionedProductNameOrArn", + "documentation":"

The ARN of the provisioned product.

" + }, + "Type":{ + "shape":"ProvisionedProductType", + "documentation":"

The type of provisioned product. The supported value is CFN_STACK.

" + }, + "Id":{ + "shape":"Id", + "documentation":"

The identifier of the provisioned product.

" + }, + "Status":{ + "shape":"ProvisionedProductStatus", + "documentation":"

The current status of the provisioned product.

  • AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

  • UNDER_CHANGE - Transitive state, operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

  • TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

  • ERROR - An unexpected error occurred, the provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

" + }, + "StatusMessage":{ + "shape":"ProvisionedProductStatusMessage", + "documentation":"

The current status message of the provisioned product.

" + }, + "CreatedTime":{ + "shape":"CreatedTime", + "documentation":"

The UTC time stamp of the creation time.

" + }, + "IdempotencyToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

" + }, + "LastRecordId":{ + "shape":"Id", + "documentation":"

The record identifier of the last request performed on this provisioned product.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

One or more tags.

" + }, + "PhysicalId":{ + "shape":"PhysicalId", + "documentation":"

The assigned identifier for the resource, such as an EC2 instance ID or an S3 bucket name.

" + }, + "ProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "ProvisioningArtifactId":{ + "shape":"Id", + "documentation":"

The identifier of the provisioning artifact.

" + }, + "UserArn":{ + "shape":"UserArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM user.

" + }, + "UserArnSession":{ + "shape":"UserArnSession", + "documentation":"

The ARN of the IAM user in the session. This ARN might contain a session ID.

" + } + }, + "documentation":"

Information about a provisioned product.

" + }, + "ProvisionedProductAttributes":{ + "type":"list", + "member":{"shape":"ProvisionedProductAttribute"} + }, "ProvisionedProductDetail":{ "type":"structure", "members":{ @@ -2669,7 +3083,7 @@ }, "CreatedTime":{ "shape":"CreatedTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", @@ -2686,6 +3100,11 @@ "type":"list", "member":{"shape":"ProvisionedProductDetail"} }, + "ProvisionedProductFilters":{ + "type":"map", + "key":{"shape":"ProvisionedProductViewFilterBy"}, + "value":{"shape":"ProvisionedProductViewFilterValues"} + }, "ProvisionedProductId":{"type":"string"}, "ProvisionedProductName":{ "type":"string", @@ -2699,17 +3118,143 @@ "min":1, "pattern":"[a-zA-Z0-9][a-zA-Z0-9._-]{0,127}|arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" }, + "ProvisionedProductPlanDetails":{ + "type":"structure", + "members":{ + "CreatedTime":{ + "shape":"CreatedTime", + "documentation":"

The UTC time stamp of the creation time.

" + }, + "PathId":{ + "shape":"Id", + "documentation":"

The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path. To list the paths for a product, use ListLaunchPaths.

" + }, + "ProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "PlanName":{ + "shape":"ProvisionedProductPlanName", + "documentation":"

The name of the plan.

" + }, + "PlanId":{ + "shape":"Id", + "documentation":"

The plan identifier.

" + }, + "ProvisionProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "ProvisionProductName":{ + "shape":"ProvisionedProductName", + "documentation":"

The user-friendly name of the provisioned product.

" + }, + "PlanType":{ + "shape":"ProvisionedProductPlanType", + "documentation":"

The plan type.

" + }, + "ProvisioningArtifactId":{ + "shape":"Id", + "documentation":"

The identifier of the provisioning artifact.

" + }, + "Status":{ + "shape":"ProvisionedProductPlanStatus", + "documentation":"

The status.

" + }, + "UpdatedTime":{ + "shape":"UpdatedTime", + "documentation":"

The time when the plan was last updated.

" + }, + "NotificationArns":{ + "shape":"NotificationArns", + "documentation":"

Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events.

" + }, + "ProvisioningParameters":{ + "shape":"UpdateProvisioningParameters", + "documentation":"

Parameters specified by the administrator that are required for provisioning the product.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

One or more tags.

" + }, + "StatusMessage":{ + "shape":"StatusMessage", + "documentation":"

The status message.

" + } + }, + "documentation":"

Information about a plan.

" + }, + "ProvisionedProductPlanName":{"type":"string"}, + "ProvisionedProductPlanStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_SUCCESS", + "CREATE_FAILED", + "EXECUTE_IN_PROGRESS", + "EXECUTE_SUCCESS", + "EXECUTE_FAILED" + ] + }, + "ProvisionedProductPlanSummary":{ + "type":"structure", + "members":{ + "PlanName":{ + "shape":"ProvisionedProductPlanName", + "documentation":"

The name of the plan.

" + }, + "PlanId":{ + "shape":"Id", + "documentation":"

The plan identifier.

" + }, + "ProvisionProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "ProvisionProductName":{ + "shape":"ProvisionedProductName", + "documentation":"

The user-friendly name of the provisioned product.

" + }, + "PlanType":{ + "shape":"ProvisionedProductPlanType", + "documentation":"

The plan type.

" + }, + "ProvisioningArtifactId":{ + "shape":"Id", + "documentation":"

The identifier of the provisioning artifact.

" + } + }, + "documentation":"

Summary information about a plan.

" + }, + "ProvisionedProductPlanType":{ + "type":"string", + "enum":["CLOUDFORMATION"] + }, + "ProvisionedProductPlans":{ + "type":"list", + "member":{"shape":"ProvisionedProductPlanSummary"} + }, "ProvisionedProductStatus":{ "type":"string", "enum":[ "AVAILABLE", "UNDER_CHANGE", "TAINTED", - "ERROR" + "ERROR", + "PLAN_IN_PROGRESS" ] }, "ProvisionedProductStatusMessage":{"type":"string"}, "ProvisionedProductType":{"type":"string"}, + "ProvisionedProductViewFilterBy":{ + "type":"string", + "enum":["SearchQuery"] + }, + "ProvisionedProductViewFilterValue":{"type":"string"}, + "ProvisionedProductViewFilterValues":{ + "type":"list", + "member":{"shape":"ProvisionedProductViewFilterValue"} + }, "ProvisioningArtifact":{ "type":"structure", "members":{ @@ -2727,7 +3272,7 @@ }, "CreatedTime":{ "shape":"ProvisioningArtifactCreatedTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" } }, "documentation":"

Information about a provisioning artifact. A provisioning artifact is also known as a product version.

" @@ -2756,7 +3301,7 @@ }, "CreatedTime":{ "shape":"CreationTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" }, "Active":{ "shape":"ProvisioningArtifactActive", @@ -2862,7 +3407,7 @@ }, "CreatedTime":{ "shape":"ProvisioningArtifactCreatedTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" }, "ProvisioningArtifactMetadata":{ "shape":"ProvisioningArtifactInfo", @@ -2918,7 +3463,7 @@ }, "CreatedTime":{ "shape":"CreatedTime", - "documentation":"

The UTC timestamp of the creation time.

" + "documentation":"

The UTC time stamp of the creation time.

" }, "UpdatedTime":{ "shape":"UpdatedTime", @@ -2930,7 +3475,7 @@ }, "RecordType":{ "shape":"RecordType", - "documentation":"

The record type for this record.

  • PROVISION_PRODUCT

  • UPDATE_PROVISIONED_PRODUCT

  • TERMINATE_PROVISIONED_PRODUCT

" + "documentation":"

The record type.

  • PROVISION_PRODUCT

  • UPDATE_PROVISIONED_PRODUCT

  • TERMINATE_PROVISIONED_PRODUCT

" }, "ProvisionedProductId":{ "shape":"Id", @@ -2950,11 +3495,11 @@ }, "RecordErrors":{ "shape":"RecordErrors", - "documentation":"

The errors that occurred while processing the request.

" + "documentation":"

The errors that occurred.

" }, "RecordTags":{ "shape":"RecordTags", - "documentation":"

The tags associated with this record.

" + "documentation":"

One or more tags.

" } }, "documentation":"

Information about a request operation.

" @@ -3025,7 +3570,7 @@ "documentation":"

The value for this tag.

" } }, - "documentation":"

A tag associated with the record, stored as a key-value pair.

" + "documentation":"

Information about a tag, which is a key-value pair.

" }, "RecordTagKey":{ "type":"string", @@ -3064,11 +3609,98 @@ "members":{ } }, + "Replacement":{ + "type":"string", + "enum":[ + "TRUE", + "FALSE", + "CONDITIONAL" + ] + }, + "RequiresRecreation":{ + "type":"string", + "enum":[ + "NEVER", + "CONDITIONALLY", + "ALWAYS" + ] + }, "ResourceARN":{ "type":"string", "max":150, "min":1 }, + "ResourceAttribute":{ + "type":"string", + "enum":[ + "PROPERTIES", + "METADATA", + "CREATIONPOLICY", + "UPDATEPOLICY", + "DELETIONPOLICY", + "TAGS" + ] + }, + "ResourceChange":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"ChangeAction", + "documentation":"

The change action.

" + }, + "LogicalResourceId":{ + "shape":"LogicalResourceId", + "documentation":"

The ID of the resource, as defined in the CloudFormation template.

" + }, + "PhysicalResourceId":{ + "shape":"PhysicalResourceId", + "documentation":"

The ID of the resource, if it was already created.

" + }, + "ResourceType":{ + "shape":"PlanResourceType", + "documentation":"

The type of resource.

" + }, + "Replacement":{ + "shape":"Replacement", + "documentation":"

If the change type is Modify, indicates whether the existing resource is deleted and replaced with a new one.

" + }, + "Scope":{ + "shape":"Scope", + "documentation":"

The change scope.

" + }, + "Details":{ + "shape":"ResourceChangeDetails", + "documentation":"

Information about the resource changes.

" + } + }, + "documentation":"

Information about a resource change that will occur when a plan is executed.

" + }, + "ResourceChangeDetail":{ + "type":"structure", + "members":{ + "Target":{ + "shape":"ResourceTargetDefinition", + "documentation":"

Information about the resource attribute that will be modified.

" + }, + "Evaluation":{ + "shape":"EvaluationType", + "documentation":"

For static evaluations, the value the resource attribute will change and the new value is known. For dynamic evaluations, the value might change, and any new value will be determined when the plan is updated.

" + }, + "CausingEntity":{ + "shape":"CausingEntity", + "documentation":"

The ID of the entity that caused the change.

" + } + }, + "documentation":"

Information about a change to a resource attribute.

" + }, + "ResourceChangeDetails":{ + "type":"list", + "member":{"shape":"ResourceChangeDetail"} + }, + "ResourceChanges":{ + "type":"list", + "member":{"shape":"ResourceChange"} + }, "ResourceDetail":{ "type":"structure", "members":{ @@ -3109,7 +3741,7 @@ "type":"structure", "members":{ }, - "documentation":"

A resource that is currently in use. Ensure the resource is not in use and retry the operation.

", + "documentation":"

A resource that is currently in use. Ensure that the resource is not in use and retry the operation.

", "exception":true }, "ResourceNotFoundException":{ @@ -3119,6 +3751,24 @@ "documentation":"

The specified resource was not found.

", "exception":true }, + "ResourceTargetDefinition":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"ResourceAttribute", + "documentation":"

The attribute that will change.

" + }, + "Name":{ + "shape":"PropertyName", + "documentation":"

If the attribute is Properties, the value is the name of the property. Otherwise, the value is null.

" + }, + "RequiresRecreation":{ + "shape":"RequiresRecreation", + "documentation":"

If the attribute is Properties, indicates whether a change to this property causes the resource to be recreated.

" + } + }, + "documentation":"

Information about a change to a resource attribute.

" + }, "ResourceType":{"type":"string"}, "ScanProvisionedProductsInput":{ "type":"structure", @@ -3154,6 +3804,10 @@ } } }, + "Scope":{ + "type":"list", + "member":{"shape":"ResourceAttribute"} + }, "SearchFilterKey":{"type":"string"}, "SearchFilterValue":{"type":"string"}, "SearchProductsAsAdminInput":{ @@ -3252,6 +3906,62 @@ } } }, + "SearchProvisionedProductsInput":{ + "type":"structure", + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "AccessLevelFilter":{ + "shape":"AccessLevelFilter", + "documentation":"

The access level to use to obtain results. The default is User.

" + }, + "Filters":{ + "shape":"ProvisionedProductFilters", + "documentation":"

The search filters.

When the key is SearchQuery, the searchable fields are arn, createdTime, id, lastRecordId, idempotencyToken, name, physicalId, productId, provisioningArtifact, type, status, tags, userArn, and userArnSession.

Example: \"SearchQuery\":[\"status:AVAILABLE\"]

" + }, + "SortBy":{ + "shape":"SortField", + "documentation":"

The sort field. If no value is specified, the results are not sorted. The valid values are arn, id, name, and lastRecordId.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order. If no value is specified, the results are not sorted.

" + }, + "PageSize":{ + "shape":"SearchProvisionedProductsPageSize", + "documentation":"

The maximum number of items to return with this call.

" + }, + "PageToken":{ + "shape":"PageToken", + "documentation":"

The page token for the next set of results. To retrieve the first set of results, use null.

" + } + } + }, + "SearchProvisionedProductsOutput":{ + "type":"structure", + "members":{ + "ProvisionedProducts":{ + "shape":"ProvisionedProductAttributes", + "documentation":"

Information about the provisioned products.

" + }, + "TotalResultsCount":{ + "shape":"TotalResultsCount", + "documentation":"

The number of provisioned products found.

" + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

The page token to use to retrieve the next set of results. If there are no additional results, this value is null.

" + } + } + }, + "SearchProvisionedProductsPageSize":{ + "type":"integer", + "max":100, + "min":0 + }, + "SortField":{"type":"string"}, "SortOrder":{ "type":"string", "enum":[ @@ -3277,6 +3987,10 @@ ] }, "StatusDetail":{"type":"string"}, + "StatusMessage":{ + "type":"string", + "pattern":"[\\u0009\\u000a\\u000d\\u0020-\\uD7FF\\uE000-\\uFFFD]*" + }, "SupportDescription":{"type":"string"}, "SupportEmail":{"type":"string"}, "SupportUrl":{"type":"string"}, @@ -3296,7 +4010,7 @@ "documentation":"

The value for this key.

" } }, - "documentation":"

Information about a tag. A tag is a key-value pair. Tags are entirely discretionary and are propagated to the resources created when provisioning a product.

" + "documentation":"

Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.

" }, "TagKey":{ "type":"string", @@ -3428,6 +4142,7 @@ } } }, + "TotalResultsCount":{"type":"integer"}, "UpdateConstraintInput":{ "type":"structure", "required":["Id"], @@ -3607,7 +4322,7 @@ }, "UpdateToken":{ "shape":"IdempotencyToken", - "documentation":"

The idempotency token that uniquely identifies the provisioning update rquest.

", + "documentation":"

The idempotency token that uniquely identifies the provisioning update request.

", "idempotencyToken":true } } @@ -3740,6 +4455,8 @@ "member":{"shape":"UsageInstruction"} }, "UsePreviousValue":{"type":"boolean"}, + "UserArn":{"type":"string"}, + "UserArnSession":{"type":"string"}, "Verbose":{"type":"boolean"} }, "documentation":"AWS Service Catalog

AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for use on AWS. To get the most out of this documentation, you should be familiar with the terminology discussed in AWS Service Catalog Concepts.

" diff --git a/botocore/data/servicediscovery/2017-03-14/service-2.json b/botocore/data/servicediscovery/2017-03-14/service-2.json index 604459d7..4ab34b88 100644 --- a/botocore/data/servicediscovery/2017-03-14/service-2.json +++ b/botocore/data/servicediscovery/2017-03-14/service-2.json @@ -59,7 +59,7 @@ {"shape":"NamespaceNotFound"}, {"shape":"ServiceAlreadyExists"} ], - "documentation":"

Creates a service, which defines a template for the following entities:

  • One to five resource record sets

  • Optionally, a health check

After you create the service, you can submit a RegisterInstance request, and Amazon Route 53 uses the values in the template to create the specified entities.

" + "documentation":"

Creates a service, which defines the configuration for the following entities:

  • Up to three records (A, AAAA, and SRV) or one CNAME record

  • Optionally, a health check

After you create the service, you can submit a RegisterInstance request, and Amazon Route 53 uses the values in the configuration to create the specified entities.

" }, "DeleteNamespace":{ "name":"DeleteNamespace", @@ -107,7 +107,7 @@ {"shape":"ResourceInUse"}, {"shape":"ServiceNotFound"} ], - "documentation":"

Deletes the resource record sets and the health check, if any, that Amazon Route 53 created for the specified instance.

" + "documentation":"

Deletes the records and the health check, if any, that Amazon Route 53 created for the specified instance.

" }, "GetInstance":{ "name":"GetInstance", @@ -137,7 +137,7 @@ {"shape":"InvalidInput"}, {"shape":"ServiceNotFound"} ], - "documentation":"

Gets the current health status (Healthy, Unhealthy, or Unknown) of one or more instances that are associated with a specified service.

" + "documentation":"

Gets the current health status (Healthy, Unhealthy, or Unknown) of one or more instances that are associated with a specified service.

There is a brief delay between when you register an instance and when the health status for the instance is available.

" }, "GetNamespace":{ "name":"GetNamespace", @@ -164,7 +164,7 @@ "errors":[ {"shape":"OperationNotFound"} ], - "documentation":"

Gets information about any operation that returns an operation ID in the response, such as a CreateService request. To get a list of operations that match specified criteria, see ListOperations.

" + "documentation":"

Gets information about any operation that returns an operation ID in the response, such as a CreateService request.

To get a list of operations that match specified criteria, see ListOperations.

" }, "GetService":{ "name":"GetService", @@ -192,7 +192,7 @@ {"shape":"ServiceNotFound"}, {"shape":"InvalidInput"} ], - "documentation":"

Gets summary information about the instances that you created by using a specified service.

" + "documentation":"

Lists summary information about the instances that you registered by using a specified service.

" }, "ListNamespaces":{ "name":"ListNamespaces", @@ -205,7 +205,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

Gets information about the namespaces that were created by the current AWS account.

" + "documentation":"

Lists summary information about the namespaces that were created by the current AWS account.

" }, "ListOperations":{ "name":"ListOperations", @@ -228,7 +228,10 @@ }, "input":{"shape":"ListServicesRequest"}, "output":{"shape":"ListServicesResponse"}, - "documentation":"

Gets settings for all the services that are associated with one or more specified namespaces.

" + "errors":[ + {"shape":"InvalidInput"} + ], + "documentation":"

Lists summary information for all the services that are associated with one or more specified namespaces.

" }, "RegisterInstance":{ "name":"RegisterInstance", @@ -245,7 +248,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ServiceNotFound"} ], - "documentation":"

Creates one or more resource record sets and optionally a health check based on the settings in a specified service. When you submit a RegisterInstance request, Amazon Route 53 does the following:

  • Creates a resource record set for each resource record set template in the service

  • Creates a health check based on the settings in the health check template in the service, if any

  • Associates the health check, if any, with each of the resource record sets

One RegisterInstance request must complete before you can submit another request and specify the same service and instance ID.

For more information, see CreateService.

When Amazon Route 53 receives a DNS query for the specified DNS name, it returns the applicable value:

  • If the health check is healthy: returns all the resource record sets

  • If the health check is unhealthy: returns the IP address of the last healthy instance

  • If you didn't specify a health check template: returns all the resource record sets

" + "documentation":"

Creates or updates one or more records and optionally a health check based on the settings in a specified service. When you submit a RegisterInstance request, Amazon Route 53 does the following:

  • For each DNS record that you define in the service specified by ServiceId, creates or updates a record in the hosted zone that is associated with the corresponding namespace

  • Creates or updates a health check based on the settings in the health check configuration, if any, for the service

  • Associates the health check, if any, with each of the records

One RegisterInstance request must complete before you can submit another request and specify the same service ID and instance ID.

For more information, see CreateService.

When Route 53 receives a DNS query for the specified DNS name, it returns the applicable value:

  • If the health check is healthy: returns all the records

  • If the health check is unhealthy: returns the IP address of the last healthy instance

  • If you didn't specify a health check configuration: returns all the records

" }, "UpdateService":{ "name":"UpdateService", @@ -260,7 +263,7 @@ {"shape":"InvalidInput"}, {"shape":"ServiceNotFound"} ], - "documentation":"

Updates the TTL setting for a specified service. You must specify all the resource record set templates (and, optionally, a health check template) that you want to appear in the updated service. Any current resource record set templates (or health check template) that don't appear in an UpdateService request are deleted.

When you update the TTL setting for a service, Amazon Route 53 also updates the corresponding settings in all the resource record sets and health checks that were created by using the specified service.

" + "documentation":"

Submits a request to perform the following operations:

  • Add or delete DnsRecords configurations

  • Update the TTL setting for existing DnsRecords configurations

  • Add, update, or delete HealthCheckConfig for a specified service

You must specify all DnsRecords configurations (and, optionally, HealthCheckConfig) that you want to appear in the updated service. Any current configurations that don't appear in an UpdateService request are deleted.

When you update the TTL setting for a service, Amazon Route 53 also updates the corresponding settings in all the records and health checks that were created by using the specified service.

" } }, "shapes":{ @@ -295,7 +298,7 @@ }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

", + "documentation":"

A unique string that identifies the request and that allows failed CreatePrivateDnsNamespace requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.

", "idempotencyToken":true }, "Description":{ @@ -327,7 +330,7 @@ }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

", + "documentation":"

A unique string that identifies the request and that allows failed CreatePublicDnsNamespace requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.

", "idempotencyToken":true }, "Description":{ @@ -358,7 +361,7 @@ }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

", + "documentation":"

A unique string that identifies the request and that allows failed CreateService requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.

", "idempotencyToken":true }, "Description":{ @@ -367,11 +370,11 @@ }, "DnsConfig":{ "shape":"DnsConfig", - "documentation":"

A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

A complex type that contains information about the records that you want Route 53 to create when you register an instance.

" }, "HealthCheckConfig":{ "shape":"HealthCheckConfig", - "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, Amazon Route 53 associates the health check with all the resource record sets that you specify in DnsConfig.

The health check uses 30 seconds as the request interval. This is the number of seconds between the time that each Amazon Route 53 health checker gets a response from your endpoint and the time that it sends the next health check request. A health checker in each data center around the world sends your endpoint a health check request every 30 seconds. On average, your endpoint receives a health check request about every two seconds. Health checkers in different data centers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.

For information about the charges for health checks, see Amazon Route 53 Pricing.

" + "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, Route 53 associates the health check with all the records that you specify in DnsConfig.

For information about the charges for health checks, see Route 53 Pricing.

" } } }, @@ -455,12 +458,16 @@ "shape":"ResourceId", "documentation":"

The ID of the namespace to use for DNS configuration.

" }, + "RoutingPolicy":{ + "shape":"RoutingPolicy", + "documentation":"

The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify this service.

If you want to use this service to register instances that create alias records, specify WEIGHTED for the routing policy.

You can specify the following values:

MULTIVALUE

If you define a health check for the service and the health check is healthy, Route 53 returns the applicable value for up to eight instances.

For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with IP addresses for up to eight healthy instances. If fewer than eight instances are healthy, Route 53 responds to every DNS query with the IP addresses for all of the healthy instances.

If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the values for up to eight instances.

For more information about the multivalue routing policy, see Multivalue Answer Routing in the Route 53 Developer Guide.

WEIGHTED

Route 53 returns the applicable value from one randomly selected instance from among the instances that you registered using the same service. Currently, all records have the same weight, so you can't route more or less traffic to any instances.

For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with the IP address for one randomly selected instance from among the healthy instances. If no instances are healthy, Route 53 responds to DNS queries as if all of the instances were healthy.

If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the applicable value for one randomly selected instance.

For more information about the weighted routing policy, see Weighted Routing in the Route 53 Developer Guide.

" + }, "DnsRecords":{ "shape":"DnsRecordList", - "documentation":"

An array that contains one DnsRecord object for each resource record set that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

An array that contains one DnsRecord object for each record that you want Route 53 to create when you register an instance.

" } }, - "documentation":"

A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

A complex type that contains information about the records that you want Amazon Route 53 to create when you register an instance.

" }, "DnsConfigChange":{ "type":"structure", @@ -468,20 +475,20 @@ "members":{ "DnsRecords":{ "shape":"DnsRecordList", - "documentation":"

An array that contains one DnsRecord object for each resource record set that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

An array that contains one DnsRecord object for each record that you want Route 53 to create when you register an instance.

" } }, - "documentation":"

A complex type that contains information about changes to the resource record sets that Amazon Route 53 creates when you register an instance.

" + "documentation":"

A complex type that contains information about changes to the records that Route 53 creates when you register an instance.

" }, "DnsProperties":{ "type":"structure", "members":{ "HostedZoneId":{ "shape":"ResourceId", - "documentation":"

The ID for the hosted zone that Amazon Route 53 creates when you create a namespace.

" + "documentation":"

The ID for the hosted zone that Route 53 creates when you create a namespace.

" } }, - "documentation":"

A complex type that contains the ID for the hosted zone that Amazon Route 53 creates when you create a namespace.

" + "documentation":"

A complex type that contains the ID for the hosted zone that Route 53 creates when you create a namespace.

" }, "DnsRecord":{ "type":"structure", @@ -492,14 +499,14 @@ "members":{ "Type":{ "shape":"RecordType", - "documentation":"

The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. The following values are supported:

  • A: Amazon Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.

  • AAAA: Amazon Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.

  • SRV: Amazon Route 53 returns the value for an SRV record. The value for an SRV record uses the following template, which can't be changed:

    priority weight port resource-record-set-name

    The values of priority and weight are both set to 1. The value of port comes from the value that you specify for Port when you submit a RegisterInstance request.

" + "documentation":"

The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries.

Note the following:

  • A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.

  • CNAME records: If you specify CNAME for Type, you can't define any other records. This is a limitation of DNS—you can't create a CNAME record and any other type of record that has the same name as a CNAME record.

  • Alias records: If you want Route 53 to create an alias record when you register an instance, specify A or AAAA for Type.

  • All records: You specify settings other than TTL and Type when you register an instance.

The following values are supported:

A

Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.

AAAA

Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.

CNAME

Route 53 returns the domain name of the resource, such as www.example.com. Note the following:

  • You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes.

  • You must specify WEIGHTED for the value of RoutingPolicy.

  • You can't specify both CNAME for Type and settings for HealthCheckConfig. If you do, the request will fail with an InvalidInput error.

SRV

Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:

priority weight port service-hostname

Note the following about the values:

  • The values of priority and weight are both set to 1 and can't be changed.

  • The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.

  • The value of service-hostname is a concatenation of the following values:

    • The value that you specify for InstanceId when you register an instance.

    • The name of the service.

    • The name of the namespace.

    For example, if the value of InstanceId is test, the name of the service is backend, and the name of the namespace is example.com, the value of service-hostname is:

    test.backend.example.com

If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both in the RegisterInstance request, Route 53 automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.

" }, "TTL":{ "shape":"RecordTTL", - "documentation":"

The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.

" + "documentation":"

The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.

Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.

" } }, - "documentation":"

A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

A complex type that contains information about the records that you want Route 53 to create when you register an instance.

" }, "DnsRecordList":{ "type":"list", @@ -510,7 +517,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

This request tried to create an object that already exists.

", + "documentation":"

The operation is already in progress.

", "exception":true }, "ErrorMessage":{"type":"string"}, @@ -572,11 +579,11 @@ }, "Instances":{ "shape":"InstanceIdList", - "documentation":"

An array that contains the IDs of all the instances that you want to get the health status for. To get the IDs for the instances that you've created by using a specified service, submit a ListInstances request.

If you omit Instances, Amazon Route 53 returns the health status for all the instances that are associated with the specified service.

" + "documentation":"

An array that contains the IDs of all the instances that you want to get the health status for.

If you omit Instances, Amazon Route 53 returns the health status for all the instances that are associated with the specified service.

To get the IDs for the instances that you've registered by using a specified service, submit a ListInstances request.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of instances that you want Amazon Route 53 to return in the response to a GetInstancesHealthStatus request. If you don't specify a value for MaxResults, Amazon Route 53 returns up to 100 instances.

" + "documentation":"

The maximum number of instances that you want Route 53 to return in the response to a GetInstancesHealthStatus request. If you don't specify a value for MaxResults, Route 53 returns up to 100 instances.

" }, "NextToken":{ "shape":"NextToken", @@ -656,21 +663,22 @@ }, "HealthCheckConfig":{ "type":"structure", + "required":["Type"], "members":{ "Type":{ "shape":"HealthCheckType", - "documentation":"

The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

  • HTTP: Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.

  • HTTPS: Amazon Route 53 tries to establish a TCP connection. If successful, Amazon Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.

    If you specify HTTPS for the value of Type, the endpoint must support TLS v1.0 or later.

  • TCP: Amazon Route 53 tries to establish a TCP connection.

For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide.

" + "documentation":"

The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

  • HTTP: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.

  • HTTPS: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.

    If you specify HTTPS for the value of Type, the endpoint must support TLS v1.0 or later.

  • TCP: Route 53 tries to establish a TCP connection.

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide.

" }, "ResourcePath":{ "shape":"ResourcePath", - "documentation":"

The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, such as the file /docs/route53-health-check.html. Amazon Route 53 automatically adds the DNS name for the service and a leading forward slash (/) character.

" + "documentation":"

The path that you want Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, such as the file /docs/route53-health-check.html. Route 53 automatically adds the DNS name for the service and a leading forward slash (/) character.

" }, "FailureThreshold":{ "shape":"FailureThreshold", - "documentation":"

The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide.

" + "documentation":"

The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide.

" } }, - "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, Amazon Route 53 associates the health check with all the resource record sets that you specify in DnsConfig.

The health check uses 30 seconds as the request interval. This is the number of seconds between the time that each Amazon Route 53 health checker gets a response from your endpoint and the time that it sends the next health check request. A health checker in each data center around the world sends your endpoint a health check request every 30 seconds. On average, your endpoint receives a health check request about every two seconds. Health checkers in different data centers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.

For information about the charges for health checks, see Amazon Route 53 Pricing.

" + "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, Amazon Route 53 associates the health check with all the records that you specify in DnsConfig.

A and AAAA records

If DnsConfig includes configurations for both A and AAAA records, Route 53 creates a health check that uses the IPv4 address to check the health of the resource. If the endpoint that is specified by the IPv4 address is unhealthy, Route 53 considers both the A and AAAA records to be unhealthy.

CNAME records

You can't specify settings for HealthCheckConfig when the DNSConfig includes CNAME for the value of Type. If you do, the CreateService request will fail with an InvalidInput error.

Request interval

The health check uses 30 seconds as the request interval. This is the number of seconds between the time that each Route 53 health checker gets a response from your endpoint and the time that it sends the next health check request. A health checker in each data center around the world sends your endpoint a health check request every 30 seconds. On average, your endpoint receives a health check request about every two seconds. Health checkers in different data centers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.

Health checking regions

Health checkers perform checks from all Route 53 health-checking regions. For a list of the current regions, see Regions.

Alias records

When you register an instance, if you include the AWS_ALIAS_DNS_NAME attribute, Route 53 creates an alias record. Note the following:

  • Route 53 automatically sets EvaluateTargetHealth to true for alias records. When EvaluateTargetHealth is true, the alias record inherits the health of the referenced AWS resource. such as an ELB load balancer. For more information, see EvaluateTargetHealth.

  • If you include HealthCheckConfig and then use the service to register an instance that creates an alias record, Route 53 doesn't create the health check.

For information about the charges for health checks, see Route 53 Pricing.

" }, "HealthCheckType":{ "type":"string", @@ -694,15 +702,15 @@ "members":{ "Id":{ "shape":"ResourceId", - "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • You can use this value to update an existing instance.

  • To associate a new instance, you must specify a value that is unique among instances that you associate by using the same service.

" + "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord$Type.

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

  • If you specify an existing InstanceId and ServiceId, Route 53 updates the existing records. If there's also an existing health check, Route 53 deletes the old health check and creates a new one.

    The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.

" }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

" + "documentation":"

A unique string that identifies the request and that allows failed RegisterInstance requests to be retried without the risk of executing the operation twice. You must use a unique CreatorRequestId string every time you submit a RegisterInstance request if you're registering additional instances for the same namespace and service. CreatorRequestId can be any unique string, for example, a date/time stamp.

" }, "Attributes":{ "shape":"Attributes", - "documentation":"

A string map that contains attribute keys and values. Supported attribute keys include the following:

  • AWS_INSTANCE_PORT: The port on the endpoint that you want Amazon Route 53 to perform health checks on. This value is also used for the port value in an SRV record if the service that you specify includes an SRV record. For more information, see CreateService.

  • AWS_INSTANCE_IP: If the service that you specify contains a resource record set template for an A or AAAA record, the IP address that you want Amazon Route 53 to use for the value of the A record.

  • AWS_INSTANCE_WEIGHT: The weight value in an SRV record if the service that you specify includes an SRV record. You can also specify a default weight that is applied to all instances in the Service configuration. For more information, see CreateService.

  • AWS_INSTANCE_PRIORITY: The priority value in an SRV record if the service that you specify includes an SRV record.

" + "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

  • AWS_ALIAS_DNS_NAME: If you want Route 53 to create an alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget.

    Note the following:

    • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

    • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

    • If the service that is specified by ServiceId includes HealthCheckConfig settings, Route 53 will create the health check, but it won't associate the health check with the alias record.

    • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

    • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

  • AWS_INSTANCE_CNAME: If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

    This value is required if the service specified by ServiceId includes settings for an CNAME record.

  • AWS_INSTANCE_IPV4: If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

    This value is required if the service specified by ServiceId includes settings for an A record. Either AWS_INSTANCE_IPV4 or AWS_INSTANCE_IPV6 is required if the service includes settings for an SRV record.

  • AWS_INSTANCE_IPV6: If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

    This value is required if the service specified by ServiceId includes settings for an AAAA record. Either AWS_INSTANCE_IPV4 or AWS_INSTANCE_IPV6 is required if the service includes settings for an SRV record.

  • AWS_INSTANCE_PORT: If the service includes an SRV record, the value that you want Route 53 to return for the port. In addition, if the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to. For more information, see CreateService.

    This value is required if you specified settings for an SRV record when you created the service.

" } }, "documentation":"

A complex type that contains information about an instance that Amazon Route 53 creates when you submit a RegisterInstance request.

" @@ -722,7 +730,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

No instance exists with the specified ID.

", + "documentation":"

No instance exists with the specified ID, or the instance was recently registered, and information about the instance hasn't propagated yet.

", "exception":true }, "InstanceSummary":{ @@ -734,10 +742,10 @@ }, "Attributes":{ "shape":"Attributes", - "documentation":"

A string map that contain attribute keys and values for an instance. Supported attribute keys include the following:

  • AWS_INSTANCE_PORT: The port on the endpoint that you want Amazon Route 53 to perform health checks on. This value is also used for the port value in an SRV record if the service that you specify includes an SRV record. For more information, see CreateService.

  • AWS_INSTANCE_IP: If the service that you specify contains a resource record set template for an A or AAAA record, the IP address that you want Amazon Route 53 to use for the value of the A record.

" + "documentation":"

A string map that contains the following information:

  • The attributes that are associate with the instance.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

  • AWS_ALIAS_DNS_NAME: For an alias record that routes traffic to an Elastic Load Balancing load balancer, the DNS name that is associated with the load balancer.

  • AWS_INSTANCE_CNAME: For a CNAME record, the domain name that Route 53 returns in response to DNS queries, for example, example.com.

  • AWS_INSTANCE_IPV4: For an A record, the IPv4 address that Route 53 returns in response to DNS queries, for example, 192.0.2.44.

  • AWS_INSTANCE_IPV6: For an AAAA record, the IPv6 address that Route 53 returns in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

  • AWS_INSTANCE_PORT: For an SRV record, the value that Route 53 returns for the port. In addition, if the service includes HealthCheckConfig, the port on the endpoint that Route 53 sends requests to.

" } }, - "documentation":"

A complex type that contains information about the instances that you created by using a specified service.

" + "documentation":"

A complex type that contains information about the instances that you registered by using a specified service.

" }, "InstanceSummaryList":{ "type":"list", @@ -765,7 +773,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of instances that you want Amazon Route 53 to return in the response to a ListInstances request. If you don't specify a value for MaxResults, Amazon Route 53 returns up to 100 instances.

" + "documentation":"

The maximum number of instances that you want Amazon Route 53 to return in the response to a ListInstances request. If you don't specify a value for MaxResults, Route 53 returns up to 100 instances.

" } } }, @@ -787,15 +795,15 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

For the first ListNamespaces request, omit this value.

If more than MaxResults namespaces match the specified criteria, you can submit another ListNamespaces request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

" + "documentation":"

For the first ListNamespaces request, omit this value.

If the response contains NextToken, submit another ListNamespaces request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

Route 53 gets MaxResults namespaces and then filters them based on the specified criteria. It's possible that no namespaces in the first MaxResults namespaces matched the specified criteria but that subsequent groups of MaxResults namespaces do contain namespaces that match the criteria.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of namespaces that you want Amazon Route 53 to return in the response to a ListNamespaces request. If you don't specify a value for MaxResults, Amazon Route 53 returns up to 100 namespaces.

" + "documentation":"

The maximum number of namespaces that you want Amazon Route 53 to return in the response to a ListNamespaces request. If you don't specify a value for MaxResults, Route 53 returns up to 100 namespaces.

" }, "Filters":{ "shape":"NamespaceFilters", - "documentation":"

A complex type that contains specifications for the namespaces that you want to list.

If you specify more than one filter, an operation must match all filters to be returned by ListNamespaces.

" + "documentation":"

A complex type that contains specifications for the namespaces that you want to list.

If you specify more than one filter, a namespace must match all filters to be returned by ListNamespaces.

" } } }, @@ -808,7 +816,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

If more than MaxResults namespaces match the specified criteria, you can submit another ListNamespaces request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

" + "documentation":"

If the response contains NextToken, submit another ListNamespaces request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

Route 53 gets MaxResults namespaces and then filters them based on the specified criteria. It's possible that no namespaces in the first MaxResults namespaces matched the specified criteria but that subsequent groups of MaxResults namespaces do contain namespaces that match the criteria.

" } } }, @@ -817,11 +825,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

For the first ListOperations request, omit this value.

If more than MaxResults operations match the specified criteria, you can submit another ListOperations request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

" + "documentation":"

For the first ListOperations request, omit this value.

If the response contains NextToken, submit another ListOperations request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

Route 53 gets MaxResults operations and then filters them based on the specified criteria. It's possible that no operations in the first MaxResults operations matched the specified criteria but that subsequent groups of MaxResults operations do contain operations that match the criteria.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of items that you want Amazon Route 53 to return in the response to a ListOperations request. If you don't specify a value for MaxResults, Amazon Route 53 returns up to 100 operations.

" + "documentation":"

The maximum number of items that you want Amazon Route 53 to return in the response to a ListOperations request. If you don't specify a value for MaxResults, Route 53 returns up to 100 operations.

" }, "Filters":{ "shape":"OperationFilters", @@ -838,7 +846,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

If more than MaxResults operations match the specified criteria, you can submit another ListOperations request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

" + "documentation":"

If the response contains NextToken, submit another ListOperations request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

Route 53 gets MaxResults operations and then filters them based on the specified criteria. It's possible that no operations in the first MaxResults operations matched the specified criteria but that subsequent groups of MaxResults operations do contain operations that match the criteria.

" } } }, @@ -847,11 +855,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

For the first ListServices request, omit this value.

If more than MaxResults services match the specified criteria, you can submit another ListServices request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

" + "documentation":"

For the first ListServices request, omit this value.

If the response contains NextToken, submit another ListServices request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

Route 53 gets MaxResults services and then filters them based on the specified criteria. It's possible that no services in the first MaxResults services matched the specified criteria but that subsequent groups of MaxResults services do contain services that match the criteria.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of services that you want Amazon Route 53 to return in the response to a ListServices request. If you don't specify a value for MaxResults, Amazon Route 53 returns up to 100 services.

" + "documentation":"

The maximum number of services that you want Amazon Route 53 to return in the response to a ListServices request. If you don't specify a value for MaxResults, Route 53 returns up to 100 services.

" }, "Filters":{ "shape":"ServiceFilters", @@ -868,7 +876,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

If more than MaxResults operations match the specified criteria, the value of NextToken is the first service in the next group of services that were created by the current AWS account. To get the next group, specify the value of NextToken from the previous response in the next request.

" + "documentation":"

If the response contains NextToken, submit another ListServices request to get the next group of results. Specify the value of NextToken from the previous response in the next request.

Route 53 gets MaxResults services and then filters them based on the specified criteria. It's possible that no services in the first MaxResults services matched the specified criteria but that subsequent groups of MaxResults services do contain services that match the criteria.

" } } }, @@ -887,7 +895,7 @@ }, "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that Amazon Route 53 assigns to the namespace when you create it.

" + "documentation":"

The Amazon Resource Name (ARN) that Route 53 assigns to the namespace when you create it.

" }, "Name":{ "shape":"NamespaceName", @@ -911,11 +919,11 @@ }, "CreateDate":{ "shape":"Timestamp", - "documentation":"

The date that the namespace was created, in Unix date/time format and Coordinated Universal Time (UTC).

" + "documentation":"

The date that the namespace was created, in Unix date/time format and Coordinated Universal Time (UTC). The value of CreateDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

" + "documentation":"

A unique string that identifies the request and that allows failed requests to be retried without the risk of executing an operation twice.

" } }, "documentation":"

A complex type that contains information about a specified namespace.

" @@ -953,7 +961,7 @@ }, "Condition":{ "shape":"FilterCondition", - "documentation":"

The operator that you want to use to determine whether ListNamespaces returns a namespace. Valid values for condition include:

  • EQ: When you specify EQ for the condition, you can choose to list only public namespaces or private namespaces, but not both. EQ is the default condition and can be omitted.

  • IN: When you specify IN for the condition, you can choose to list public namespaces, private namespaces, or both.

" + "documentation":"

The operator that you want to use to determine whether ListNamespaces returns a namespace. Valid values for condition include:

  • EQ: When you specify EQ for the condition, you can choose to list only public namespaces or private namespaces, but not both. EQ is the default condition and can be omitted.

  • IN: When you specify IN for the condition, you can choose to list public namespaces, private namespaces, or both.

  • BETWEEN: Not applicable

" } }, "documentation":"

A complex type that identifies the namespaces that you want to list. You can choose to list public or private namespaces.

" @@ -983,7 +991,7 @@ "members":{ "DnsProperties":{ "shape":"DnsProperties", - "documentation":"

A complex type that contains the ID for the hosted zone that Amazon Route 53 creates when you create a namespace.

" + "documentation":"

A complex type that contains the ID for the hosted zone that Route 53 creates when you create a namespace.

" } }, "documentation":"

A complex type that contains information that is specific to the namespace type.

" @@ -1001,11 +1009,11 @@ }, "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that Amazon Route 53 assigns to the namespace when you create it.

" + "documentation":"

The Amazon Resource Name (ARN) that Route 53 assigns to the namespace when you create it.

" }, "Name":{ "shape":"NamespaceName", - "documentation":"

The name of the namespace. When you create a namespace, Amazon Route 53 automatically creates a hosted zone that has the same name as the namespace.

" + "documentation":"

The name of the namespace. When you create a namespace, Route 53 automatically creates a hosted zone that has the same name as the namespace.

" }, "Type":{ "shape":"NamespaceType", @@ -1038,7 +1046,7 @@ }, "Status":{ "shape":"OperationStatus", - "documentation":"

The status of the operation. Values include the following:

  • SUBMITTED: This is the initial state immediately after you submit a request.

  • PENDING: Amazon Route 53 is performing the operation.

  • SUCCESS: The operation succeeded.

  • FAIL: The operation failed. For the failure reason, see ErrorMessage.

" + "documentation":"

The status of the operation. Values include the following:

  • SUBMITTED: This is the initial state immediately after you submit a request.

  • PENDING: Route 53 is performing the operation.

  • SUCCESS: The operation succeeded.

  • FAIL: The operation failed. For the failure reason, see ErrorMessage.

" }, "ErrorMessage":{ "shape":"Message", @@ -1046,15 +1054,15 @@ }, "ErrorCode":{ "shape":"Code", - "documentation":"

The code associated with ErrorMessage.

" + "documentation":"

The code associated with ErrorMessage. Values for ErrorCode include the following:

  • ACCESS_DENIED

  • CANNOT_CREATE_HOSTED_ZONE

  • EXPIRED_TOKEN

  • HOSTED_ZONE_NOT_FOUND

  • INTERNAL_FAILURE

  • INVALID_CHANGE_BATCH

  • THROTTLED_REQUEST

" }, "CreateDate":{ "shape":"Timestamp", - "documentation":"

The date and time that the request was submitted, in Unix date/time format and Coordinated Universal Time (UTC).

" + "documentation":"

The date and time that the request was submitted, in Unix date/time format and Coordinated Universal Time (UTC). The value of CreateDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "UpdateDate":{ "shape":"Timestamp", - "documentation":"

The date and time that the value of Status changed to the current value, in Unix date/time format and Coordinated Universal Time (UTC).

" + "documentation":"

The date and time that the value of Status changed to the current value, in Unix date/time format and Coordinated Universal Time (UTC). The value of UpdateDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "Targets":{ "shape":"OperationTargetsMap", @@ -1080,7 +1088,7 @@ }, "Condition":{ "shape":"FilterCondition", - "documentation":"

The operator that you want to use to determine whether an operation matches the specified value. Valid values for condition include:

  • EQ: When you specify EQ for the condition, you can specify only one value. EQ is supported for NAMESPACE_ID, SERVICE_ID, STATUS, and TYPE. EQ is the default condition and can be omitted.

  • IN: When you specify IN for the condition, you can specify a list of one or more values. IN is supported for STATUS and TYPE. An operation must match one of the specified values to be returned in the response.

  • BETWEEN: Specify two values, a start date and an end date. The start date must be the first value. BETWEEN is supported for U.

" + "documentation":"

The operator that you want to use to determine whether an operation matches the specified value. Valid values for condition include:

  • EQ: When you specify EQ for the condition, you can specify only one value. EQ is supported for NAMESPACE_ID, SERVICE_ID, STATUS, and TYPE. EQ is the default condition and can be omitted.

  • IN: When you specify IN for the condition, you can specify a list of one or more values. IN is supported for STATUS and TYPE. An operation must match one of the specified values to be returned in the response.

  • BETWEEN: Specify a start date and an end date in Unix date/time format and Coordinated Universal Time (UTC). The start date must be the first value. BETWEEN is supported for UPDATE_DATE.

" } }, "documentation":"

A complex type that lets you select the operations that you want to list.

" @@ -1129,7 +1137,7 @@ }, "Status":{ "shape":"OperationStatus", - "documentation":"

The status of the operation. Values include the following:

  • SUBMITTED: This is the initial state immediately after you submit a request.

  • PENDING: Amazon Route 53 is performing the operation.

  • SUCCESS: The operation succeeded.

  • FAIL: The operation failed. For the failure reason, see ErrorMessage.

" + "documentation":"

The status of the operation. Values include the following:

  • SUBMITTED: This is the initial state immediately after you submit a request.

  • PENDING: Route 53 is performing the operation.

  • SUCCESS: The operation succeeded.

  • FAIL: The operation failed. For the failure reason, see ErrorMessage.

" } }, "documentation":"

A complex type that contains information about an operation that matches the criteria that you specified in a ListOperations request.

" @@ -1171,7 +1179,8 @@ "enum":[ "SRV", "A", - "AAAA" + "AAAA", + "CNAME" ] }, "RegisterInstanceRequest":{ @@ -1184,20 +1193,20 @@ "members":{ "ServiceId":{ "shape":"ResourceId", - "documentation":"

The ID of the service that you want to use for settings for the resource record sets and health check that Amazon Route 53 will create.

" + "documentation":"

The ID of the service that you want to use for settings for the records and health check that Route 53 will create.

" }, "InstanceId":{ "shape":"ResourceId", - "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

" + "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord$Type.

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

  • If you specify an existing InstanceId and ServiceId, Route 53 updates the existing records. If there's also an existing health check, Route 53 deletes the old health check and creates a new one.

    The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.

" }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

", + "documentation":"

A unique string that identifies the request and that allows failed RegisterInstance requests to be retried without the risk of executing the operation twice. You must use a unique CreatorRequestId string every time you submit a RegisterInstance request if you're registering additional instances for the same namespace and service. CreatorRequestId can be any unique string, for example, a date/time stamp.

", "idempotencyToken":true }, "Attributes":{ "shape":"Attributes", - "documentation":"

A string map that contain attribute keys and values. Supported attribute keys include the following:

  • AWS_INSTANCE_PORT: The port on the endpoint that you want Amazon Route 53 to perform health checks on. This value is also used for the port value in an SRV record if the service that you specify includes an SRV record. For more information, see CreateService.

  • AWS_INSTANCE_IPV4: If the service that you specify contains a resource record set template for an A record, the IPv4 address that you want Amazon Route 53 to use for the value of the A record.

  • AWS_INSTANCE_IPV6: If the service that you specify contains a resource record set template for an AAAA record, the IPv6 address that you want Amazon Route 53 to use for the value of the AAAA record.

" + "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want Route 53 to create an alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, Route 53 will create the health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. Either AWS_INSTANCE_IPV4 or AWS_INSTANCE_IPV6 is required if the service includes settings for an SRV record.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. Either AWS_INSTANCE_IPV4 or AWS_INSTANCE_IPV6 is required if the service includes settings for an SRV record.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record when you created the service.

" } } }, @@ -1239,16 +1248,23 @@ "type":"string", "max":255 }, + "RoutingPolicy":{ + "type":"string", + "enum":[ + "MULTIVALUE", + "WEIGHTED" + ] + }, "Service":{ "type":"structure", "members":{ "Id":{ "shape":"ResourceId", - "documentation":"

The ID that Amazon Route 53 assigned to the service when you created it.

" + "documentation":"

The ID that Route 53 assigned to the service when you created it.

" }, "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that Amazon Route 53 assigns to the service when you create it.

" + "documentation":"

The Amazon Resource Name (ARN) that Route 53 assigns to the service when you create it.

" }, "Name":{ "shape":"ServiceName", @@ -1264,19 +1280,19 @@ }, "DnsConfig":{ "shape":"DnsConfig", - "documentation":"

A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

A complex type that contains information about the records that you want Route 53 to create when you register an instance.

" }, "HealthCheckConfig":{ "shape":"HealthCheckConfig", - "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, Amazon Route 53 associates the health check with all the resource record sets that you specify in DnsConfig.

The health check uses 30 seconds as the request interval. This is the number of seconds between the time that each Amazon Route 53 health checker gets a response from your endpoint and the time that it sends the next health check request. A health checker in each data center around the world sends your endpoint a health check request every 30 seconds. On average, your endpoint receives a health check request about every two seconds. Health checkers in different data centers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.

For information about the charges for health checks, see Amazon Route 53 Pricing.

" + "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, Route 53 associates the health check with all the records that you specify in DnsConfig.

For information about the charges for health checks, see Route 53 Pricing.

" }, "CreateDate":{ "shape":"Timestamp", - "documentation":"

The date and time that the service was created, in Unix format and Coordinated Universal Time (UTC).

" + "documentation":"

The date and time that the service was created, in Unix format and Coordinated Universal Time (UTC). The value of CreateDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "CreatorRequestId":{ "shape":"ResourceId", - "documentation":"

An optional parameter that you can use to resolve concurrent creation requests. CreatorRequestId helps to determine if a specific client owns the namespace.

" + "documentation":"

A unique string that identifies the request and that allows failed requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.

" } }, "documentation":"

A complex type that contains information about the specified service.

" @@ -1307,7 +1323,7 @@ }, "DnsConfig":{ "shape":"DnsConfigChange", - "documentation":"

A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.

" + "documentation":"

A complex type that contains information about the records that you want Route 53 to create when you register an instance.

" }, "HealthCheckConfig":{"shape":"HealthCheckConfig"} }, @@ -1330,7 +1346,7 @@ }, "Condition":{ "shape":"FilterCondition", - "documentation":"

The operator that you want to use to determine whether a service is returned by ListServices. Valid values for Condition include the following:

  • EQ: When you specify EQ, specify one namespace ID for Values. EQ is the default condition and can be omitted.

  • IN: When you specify IN, specify a list of the IDs for the namespaces that you want ListServices to return a list of services for.

" + "documentation":"

The operator that you want to use to determine whether a service is returned by ListServices. Valid values for Condition include the following:

  • EQ: When you specify EQ, specify one namespace ID for Values. EQ is the default condition and can be omitted.

  • IN: When you specify IN, specify a list of the IDs for the namespaces that you want ListServices to return a list of services for.

  • BETWEEN: Not applicable.

" } }, "documentation":"

A complex type that lets you specify the namespaces that you want to list services for.

" @@ -1364,11 +1380,11 @@ "members":{ "Id":{ "shape":"ResourceId", - "documentation":"

The ID that Amazon Route 53 assigned to the service when you created it.

" + "documentation":"

The ID that Route 53 assigned to the service when you created it.

" }, "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that Amazon Route 53 assigns to the service when you create it.

" + "documentation":"

The Amazon Resource Name (ARN) that Route 53 assigns to the service when you create it.

" }, "Name":{ "shape":"ServiceName", @@ -1413,5 +1429,5 @@ } } }, - "documentation":"

Amazon Route 53 autonaming lets you configure public or private namespaces that your microservice applications run in. When instances of the service become available, you can call the autonaming API to register the instance, and Amazon Route 53 automatically creates up to five DNS records and an optional health check. Clients that submit DNS queries for the service receive an answer that contains up to eight healthy records.

" + "documentation":"

Amazon Route 53 auto naming lets you configure public or private namespaces that your microservice applications run in. When instances of the service become available, you can call the auto naming API to register the instance, and Route 53 automatically creates up to five DNS records and an optional health check. Clients that submit DNS queries for the service receive an answer that contains up to eight healthy records.

" } diff --git a/botocore/data/snowball/2016-06-30/examples-1.json b/botocore/data/snowball/2016-06-30/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/sns/2010-03-31/examples-1.json b/botocore/data/sns/2010-03-31/examples-1.json old mode 100755 new mode 100644 diff --git a/botocore/data/sns/2010-03-31/service-2.json b/botocore/data/sns/2010-03-31/service-2.json old mode 100755 new mode 100644 diff --git a/botocore/data/ssm/2014-11-06/paginators-1.json b/botocore/data/ssm/2014-11-06/paginators-1.json index 483ca416..94790e81 100644 --- a/botocore/data/ssm/2014-11-06/paginators-1.json +++ b/botocore/data/ssm/2014-11-06/paginators-1.json @@ -43,18 +43,12 @@ "result_key": "Parameters" }, "GetParametersByPath": { - "input_token": "NextToken", - "output_token": "NextToken", - "limit_key": "MaxResults", - "result_key": "Parameters" - }, - "GetParameterHistory": { "result_key": "Parameters", "output_token": "NextToken", "input_token": "NextToken", "limit_key": "MaxResults" }, - "GetParametersByPath": { + "GetParameterHistory": { "result_key": "Parameters", "output_token": "NextToken", "input_token": "NextToken", diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index a403a28e..2700c01d 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -3084,7 +3084,7 @@ }, "Name":{ "shape":"DocumentName", - "documentation":"

A name for the Systems Manager document.

" + "documentation":"

A name for the Systems Manager document.

Do not use the following to begin the names of documents you create. They are reserved by AWS for use as document prefixes:

  • aws

  • amazon

  • amzn

" }, "DocumentType":{ "shape":"DocumentType", @@ -3187,6 +3187,11 @@ "shape":"PatchComplianceLevel", "documentation":"

Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid compliance severity levels include the following: CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, UNSPECIFIED. The default value is UNSPECIFIED.

" }, + "ApprovedPatchesEnableNonSecurity":{ + "shape":"Boolean", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "box":true + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -3195,6 +3200,10 @@ "shape":"BaselineDescription", "documentation":"

A description of the patch baseline.

" }, + "Sources":{ + "shape":"PatchSourceList", + "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.

" + }, "ClientToken":{ "shape":"ClientToken", "documentation":"

User-provided idempotency token.

", @@ -5640,6 +5649,11 @@ "shape":"PatchComplianceLevel", "documentation":"

Returns the specified compliance severity level for approved patches in the patch baseline.

" }, + "ApprovedPatchesEnableNonSecurity":{ + "shape":"Boolean", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "box":true + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -5659,6 +5673,10 @@ "Description":{ "shape":"BaselineDescription", "documentation":"

A description of the patch baseline.

" + }, + "Sources":{ + "shape":"PatchSourceList", + "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.

" } } }, @@ -7948,7 +7966,8 @@ "WINDOWS", "AMAZON_LINUX", "UBUNTU", - "REDHAT_ENTERPRISE_LINUX" + "REDHAT_ENTERPRISE_LINUX", + "SUSE" ] }, "OwnerInformation":{ @@ -8445,7 +8464,7 @@ "documentation":"

The value for the filter key.

See PatchFilter for lists of valid values for each key based on operating system type.

" } }, - "documentation":"

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Windows7

  • Windows8

  • Windows8.1

  • Windows8Embedded

  • Windows10

  • Windows10LTSB

  • WindowsServer2008

  • WindowsServer2008R2

  • WindowsServer2012

  • WindowsServer2012R2

  • WindowsServer2016

Supported key: CLASSIFICATION

Supported values:

  • CriticalUpdates

  • DefinitionUpdates

  • Drivers

  • FeaturePacks

  • SecurityUpdates

  • ServicePacks

  • Tools

  • UpdateRollups

  • Updates

  • Upgrades

Supported key: MSRC_SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

  • Unspecified

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Ubuntu14.04

  • Ubuntu16.04

Supported key: PRIORITY

Supported values:

  • Required

  • Important

  • Standard

  • Optional

  • Extra

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2012.03

  • AmazonLinux2012.09

  • AmazonLinux2013.03

  • AmazonLinux2013.09

  • AmazonLinux2014.03

  • AmazonLinux2014.09

  • AmazonLinux2015.03

  • AmazonLinux2015.09

  • AmazonLinux2016.03

  • AmazonLinux2016.09

  • AmazonLinux2017.03

  • AmazonLinux2017.09

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • RedhatEnterpriseLinux6.5

  • RedhatEnterpriseLinux6.6

  • RedhatEnterpriseLinux6.7

  • RedhatEnterpriseLinux6.8

  • RedhatEnterpriseLinux6.9

  • RedhatEnterpriseLinux7.0

  • RedhatEnterpriseLinux7.1

  • RedhatEnterpriseLinux7.2

  • RedhatEnterpriseLinux7.3

  • RedhatEnterpriseLinux7.4

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

" + "documentation":"

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Windows7

  • Windows8

  • Windows8.1

  • Windows8Embedded

  • Windows10

  • Windows10LTSB

  • WindowsServer2008

  • WindowsServer2008R2

  • WindowsServer2012

  • WindowsServer2012R2

  • WindowsServer2016

Supported key: CLASSIFICATION

Supported values:

  • CriticalUpdates

  • DefinitionUpdates

  • Drivers

  • FeaturePacks

  • SecurityUpdates

  • ServicePacks

  • Tools

  • UpdateRollups

  • Updates

  • Upgrades

Supported key: MSRC_SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

  • Unspecified

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Ubuntu14.04

  • Ubuntu16.04

Supported key: PRIORITY

Supported values:

  • Required

  • Important

  • Standard

  • Optional

  • Extra

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2012.03

  • AmazonLinux2012.09

  • AmazonLinux2013.03

  • AmazonLinux2013.09

  • AmazonLinux2014.03

  • AmazonLinux2014.09

  • AmazonLinux2015.03

  • AmazonLinux2015.09

  • AmazonLinux2016.03

  • AmazonLinux2016.09

  • AmazonLinux2017.03

  • AmazonLinux2017.09

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • RedhatEnterpriseLinux6.5

  • RedhatEnterpriseLinux6.6

  • RedhatEnterpriseLinux6.7

  • RedhatEnterpriseLinux6.8

  • RedhatEnterpriseLinux6.9

  • RedhatEnterpriseLinux7.0

  • RedhatEnterpriseLinux7.1

  • RedhatEnterpriseLinux7.2

  • RedhatEnterpriseLinux7.3

  • RedhatEnterpriseLinux7.4

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

SUSE Linux Enterprise Server (SUSE) Operating Systems

The supported keys for SUSE operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Suse12.0

  • Suse12.1

  • Suse12.2

  • Suse12.3

  • Suse12.4

  • Suse12.5

  • Suse12.6

  • Suse12.7

  • Suse12.8

  • Suse12.9

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Recommended

  • Optional

  • Feature

  • Document

  • Yast

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

" }, "PatchFilterGroup":{ "type":"structure", @@ -8600,6 +8619,11 @@ "shape":"ApproveAfterDays", "documentation":"

The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline.

", "box":true + }, + "EnableNonSecurity":{ + "shape":"Boolean", + "documentation":"

For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is 'false'. Applies to Linux instances only.

", + "box":true } }, "documentation":"

Defines an approval rule for a patch baseline.

" @@ -8622,6 +8646,56 @@ "min":0 }, "PatchSeverity":{"type":"string"}, + "PatchSource":{ + "type":"structure", + "required":[ + "Name", + "Products", + "Configuration" + ], + "members":{ + "Name":{ + "shape":"PatchSourceName", + "documentation":"

The name specified to identify the patch source.

" + }, + "Products":{ + "shape":"PatchSourceProductList", + "documentation":"

The specific operating system versions a patch repository applies to, such as \"Ubuntu16.04\", \"AmazonLinux2016.09\", \"RedhatEnterpriseLinux7.2\" or \"Suse12.7\". For lists of supported product values, see PatchFilter.

" + }, + "Configuration":{ + "shape":"PatchSourceConfiguration", + "documentation":"

The value of the yum repo configuration. For example:

cachedir=/var/cache/yum/$basesearch

$releasever

keepcache=0

debualevel=2

" + } + }, + "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repository. Applies to Linux instances only.

" + }, + "PatchSourceConfiguration":{ + "type":"string", + "max":512, + "min":1, + "sensitive":true + }, + "PatchSourceList":{ + "type":"list", + "member":{"shape":"PatchSource"}, + "max":20, + "min":0 + }, + "PatchSourceName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.]{3,50}$" + }, + "PatchSourceProduct":{ + "type":"string", + "max":128, + "min":1 + }, + "PatchSourceProductList":{ + "type":"list", + "member":{"shape":"PatchSourceProduct"}, + "max":20, + "min":1 + }, "PatchStatus":{ "type":"structure", "members":{ @@ -8735,7 +8809,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For example: /Dev/DBServer/MySQL/db-string13

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for the fully qualified parameter name is 1011 characters.

" + "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For example: /Dev/DBServer/MySQL/db-string13

For information about parameter name requirements and restrictions, see About Creating Systems Manager Parameters in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for the fully qualified parameter name is 1011 characters.

" }, "Description":{ "shape":"ParameterDescription", @@ -10294,6 +10368,11 @@ "shape":"PatchComplianceLevel", "documentation":"

Assigns a new compliance severity level to an existing patch baseline.

" }, + "ApprovedPatchesEnableNonSecurity":{ + "shape":"Boolean", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "box":true + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -10301,6 +10380,15 @@ "Description":{ "shape":"BaselineDescription", "documentation":"

A description of the patch baseline.

" + }, + "Sources":{ + "shape":"PatchSourceList", + "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.

" + }, + "Replace":{ + "shape":"Boolean", + "documentation":"

If True, then all fields that are required by the CreatePatchBaseline action are also required for this API request. Optional fields that are not specified are set to null.

", + "box":true } } }, @@ -10335,6 +10423,11 @@ "shape":"PatchComplianceLevel", "documentation":"

The compliance severity level assigned to the patch baseline after the update completed.

" }, + "ApprovedPatchesEnableNonSecurity":{ + "shape":"Boolean", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

", + "box":true + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -10350,6 +10443,10 @@ "Description":{ "shape":"BaselineDescription", "documentation":"

A description of the Patch Baseline.

" + }, + "Sources":{ + "shape":"PatchSourceList", + "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.

" } } }, diff --git a/botocore/data/support/2013-04-15/examples-1.json b/botocore/data/support/2013-04-15/examples-1.json old mode 100755 new mode 100644 diff --git a/docs/source/conf.py b/docs/source/conf.py index 5034be62..00739faf 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.8.' # The full version, including alpha/beta/rc tags. -release = '1.8.36' +release = '1.8.40' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/requirements.txt b/requirements.txt index 772ad412..c365b843 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ wheel==0.24.0 docutils>=0.10 behave==1.2.5 -e git://github.com/boto/jmespath.git@develop#egg=jmespath +jsonschema==2.5.1 diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py index 2aa35de8..4c5812b0 100644 --- a/tests/functional/test_waiter_config.py +++ b/tests/functional/test_waiter_config.py @@ -11,16 +11,94 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import jmespath +from jsonschema import Draft4Validator import botocore.session +from botocore.exceptions import UnknownServiceError from botocore.utils import ArgumentGenerator +WAITER_SCHEMA = { + "type": "object", + "properties": { + "version": {"type": "number"}, + "waiters": { + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["api"] + }, + "operation": {"type": "string"}, + "description": {"type": "string"}, + "delay": { + "type": "number", + "minimum": 0, + }, + "maxAttempts": { + "type": "integer", + "minimum": 1 + }, + "acceptors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": ["success", "retry", "failure"] + }, + "matcher": { + "type": "string", + "enum": [ + "path", "pathAll", "pathAny", + "status", "error" + ] + }, + "argument": {"type": "string"}, + "expected": { + "oneOf": [ + {"type": "string"}, + {"type": "number"}, + {"type": "boolean"} + ] + } + }, + "required": [ + "state", "matcher", "expected" + ], + "additionalProperties": False + } + } + }, + "required": ["operation", "delay", "maxAttempts", "acceptors"], + "additionalProperties": False + } + } + }, + "additionalProperties": False +} + + def test_lint_waiter_configs(): session = botocore.session.get_session() + validator = Draft4Validator(WAITER_SCHEMA) for service_name in session.get_available_services(): client = session.create_client(service_name, 'us-east-1') service_model = client.meta.service_model + try: + # We use the loader directly here because we need the entire + # json document, not just the portions exposed (either + # internally or externally) by the WaiterModel class. + loader = session.get_component('data_loader') + waiter_model = loader.load_service_model( + service_name, 'waiters-2') + except UnknownServiceError: + # The service doesn't have waiters + continue + yield _validate_schema, validator, waiter_model for waiter_name in client.waiter_names: yield _lint_single_waiter, client, waiter_name, service_model @@ -52,6 +130,17 @@ def _lint_single_waiter(client, waiter_name, service_model): for acceptor in acceptors: _validate_acceptor(acceptor, op_model, waiter.name) + if not waiter.name.isalnum(): + raise AssertionError( + "Waiter name %s is not alphanumeric." % waiter_name + ) + + +def _validate_schema(validator, waiter_json): + errors = list(e.message for e in validator.iter_errors(waiter_json)) + if errors: + raise AssertionError('\n'.join(errors)) + def _validate_acceptor(acceptor, op_model, waiter_name): if acceptor.matcher.startswith('path'):