From d2c90f6e4a8ebf2bb49f3a6e8f545a4ee878311f Mon Sep 17 00:00:00 2001 From: TANIGUCHI Takaki Date: Thu, 3 Oct 2019 19:21:31 +0900 Subject: [PATCH] New upstream version 1.12.241+repack --- PKG-INFO | 2 +- botocore.egg-info/PKG-INFO | 2 +- botocore.egg-info/SOURCES.txt | 10 + botocore.egg-info/requires.txt | 2 +- botocore/__init__.py | 2 +- .../2017-11-09/service-2.json | 25 +- .../data/amplify/2017-07-25/service-2.json | 279 ++- .../data/apigateway/2015-07-09/service-2.json | 7 +- .../2018-11-29/service-2.json | 134 +- .../2016-02-06/service-2.json | 28 +- .../data/appmesh/2019-01-25/service-2.json | 236 ++ .../data/appstream/2016-12-01/service-2.json | 101 +- .../data/athena/2017-05-18/service-2.json | 32 +- .../data/codecommit/2015-04-13/service-2.json | 99 +- .../codepipeline/2015-07-09/service-2.json | 222 +- .../2018-10-30/service-2.json | 663 ++++- .../data/config/2014-11-12/service-2.json | 759 +++++- botocore/data/cur/2017-01-06/service-2.json | 33 +- .../data/datasync/2018-11-09/service-2.json | 205 +- botocore/data/dms/2016-01-01/service-2.json | 69 +- botocore/data/docdb/2014-10-31/service-2.json | 110 +- botocore/data/ec2/2016-11-15/service-2.json | 881 ++++++- botocore/data/ecs/2014-11-13/service-2.json | 261 +- botocore/data/eks/2017-11-01/service-2.json | 185 ++ .../elasticache/2015-02-02/service-2.json | 48 +- botocore/data/elbv2/2015-12-01/service-2.json | 18 +- botocore/data/emr/2009-03-31/service-2.json | 149 +- botocore/data/endpoints.json | 642 ++++- .../forecast/2018-06-26/paginators-1.json | 40 + .../data/forecast/2018-06-26/service-2.json | 2149 +++++++++++++++++ .../2018-06-26/paginators-1.json | 3 + .../forecastquery/2018-06-26/service-2.json | 190 ++ .../data/gamelift/2015-10-01/service-2.json | 29 +- .../2018-08-08/service-2.json | 54 +- .../data/glue/2017-03-31/paginators-1.json | 6 - botocore/data/glue/2017-03-31/service-2.json | 69 +- .../data/greengrass/2017-06-07/service-2.json | 14 +- botocore/data/iam/2010-05-08/service-2.json | 4 +- .../2015-08-14/service-2.json | 14 +- .../data/lambda/2015-03-31/service-2.json | 10 +- .../data/lightsail/2016-11-28/service-2.json | 502 +++- .../2015-07-01/service-2.json | 4 +- .../mediaconnect/2018-11-14/service-2.json | 46 +- .../mediaconvert/2017-08-29/service-2.json | 185 +- .../data/medialive/2017-10-14/service-2.json | 421 +++- .../2018-11-07/service-2.json | 5 + botocore/data/mq/2017-11-27/service-2.json | 70 +- .../organizations/2016-11-28/service-2.json | 32 +- .../2018-05-22/service-2.json | 7 +- .../personalize/2018-05-22/service-2.json | 17 +- .../qldb-session/2019-07-11/paginators-1.json | 3 + .../qldb-session/2019-07-11/service-2.json | 381 +++ .../data/qldb/2019-01-02/paginators-1.json | 3 + botocore/data/qldb/2019-01-02/service-2.json | 1036 ++++++++ botocore/data/ram/2018-01-04/service-2.json | 127 +- .../data/rds-data/2018-08-01/service-2.json | 1570 ++++++------ botocore/data/rds/2014-10-31/service-2.json | 122 +- botocore/data/rds/2014-10-31/waiters-2.json | 97 +- .../redshift/2012-12-01/paginators-1.json | 6 + .../data/redshift/2012-12-01/service-2.json | 151 ++ .../rekognition/2016-06-27/service-2.json | 56 +- .../2017-01-26/service-2.json | 2 +- .../data/robomaker/2018-06-29/service-2.json | 237 +- .../data/sagemaker/2017-07-24/service-2.json | 275 ++- .../securityhub/2018-10-26/service-2.json | 10 +- botocore/data/ses/2010-12-01/service-2.json | 14 +- botocore/data/sqs/2012-11-05/service-2.json | 77 +- botocore/data/ssm/2014-11-06/service-2.json | 29 +- .../stepfunctions/2016-11-23/service-2.json | 155 +- .../storagegateway/2013-06-30/service-2.json | 66 +- .../data/transcribe/2017-10-26/service-2.json | 28 +- .../data/transfer/2018-11-05/service-2.json | 111 +- .../waf-regional/2016-11-28/service-2.json | 12 +- botocore/data/waf/2015-08-24/service-2.json | 12 +- .../2019-05-01/paginators-1.json | 3 + .../2019-05-01/service-2.json | 74 + .../data/workspaces/2015-04-08/service-2.json | 85 +- docs/source/conf.py | 2 +- requirements.txt | 2 +- setup.cfg | 2 +- setup.py | 2 +- tests/functional/test_waiter_config.py | 2 +- tests/integration/test_ec2.py | 6 +- 83 files changed, 12006 insertions(+), 1827 deletions(-) create mode 100644 botocore/data/forecast/2018-06-26/paginators-1.json create mode 100644 botocore/data/forecast/2018-06-26/service-2.json create mode 100644 botocore/data/forecastquery/2018-06-26/paginators-1.json create mode 100644 botocore/data/forecastquery/2018-06-26/service-2.json create mode 100644 botocore/data/qldb-session/2019-07-11/paginators-1.json create mode 100644 botocore/data/qldb-session/2019-07-11/service-2.json create mode 100644 botocore/data/qldb/2019-01-02/paginators-1.json create mode 100644 botocore/data/qldb/2019-01-02/service-2.json create mode 100644 botocore/data/workmailmessageflow/2019-05-01/paginators-1.json create mode 100644 botocore/data/workmailmessageflow/2019-05-01/service-2.json diff --git a/PKG-INFO b/PKG-INFO index ae839a35..2d404c2b 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.12.208 +Version: 1.12.241 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index ae839a35..2d404c2b 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.12.208 +Version: 1.12.241 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 8b9c8364..2645da58 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -332,6 +332,10 @@ botocore/data/firehose/2015-08-04/paginators-1.json botocore/data/firehose/2015-08-04/service-2.json botocore/data/fms/2018-01-01/paginators-1.json botocore/data/fms/2018-01-01/service-2.json +botocore/data/forecast/2018-06-26/paginators-1.json +botocore/data/forecast/2018-06-26/service-2.json +botocore/data/forecastquery/2018-06-26/paginators-1.json +botocore/data/forecastquery/2018-06-26/service-2.json botocore/data/fsx/2018-03-01/paginators-1.json botocore/data/fsx/2018-03-01/service-2.json botocore/data/gamelift/2015-10-01/examples-1.json @@ -494,6 +498,10 @@ botocore/data/polly/2016-06-10/paginators-1.json botocore/data/polly/2016-06-10/service-2.json botocore/data/pricing/2017-10-15/paginators-1.json botocore/data/pricing/2017-10-15/service-2.json +botocore/data/qldb-session/2019-07-11/paginators-1.json +botocore/data/qldb-session/2019-07-11/service-2.json +botocore/data/qldb/2019-01-02/paginators-1.json +botocore/data/qldb/2019-01-02/service-2.json botocore/data/quicksight/2018-04-01/paginators-1.json botocore/data/quicksight/2018-04-01/service-2.json botocore/data/ram/2018-01-04/paginators-1.json @@ -616,6 +624,8 @@ botocore/data/worklink/2018-09-25/paginators-1.json botocore/data/worklink/2018-09-25/service-2.json botocore/data/workmail/2017-10-01/paginators-1.json botocore/data/workmail/2017-10-01/service-2.json +botocore/data/workmailmessageflow/2019-05-01/paginators-1.json +botocore/data/workmailmessageflow/2019-05-01/service-2.json botocore/data/workspaces/2015-04-08/examples-1.json botocore/data/workspaces/2015-04-08/paginators-1.json botocore/data/workspaces/2015-04-08/service-2.json diff --git a/botocore.egg-info/requires.txt b/botocore.egg-info/requires.txt index 08d58a56..ace3e70a 100644 --- a/botocore.egg-info/requires.txt +++ b/botocore.egg-info/requires.txt @@ -1,5 +1,5 @@ jmespath<1.0.0,>=0.7.1 -docutils<0.15,>=0.10 +docutils<0.16,>=0.10 python-dateutil<3.0.0,>=2.1 urllib3<1.26,>=1.20 diff --git a/botocore/__init__.py b/botocore/__init__.py index 4acc9aa3..212e7ef8 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.12.208' +__version__ = '1.12.241' class NullHandler(logging.Handler): diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index cc203ac3..375690ff 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -339,7 +339,7 @@ {"shape":"DeviceNotRegisteredException"}, {"shape":"LimitExceededException"} ], - "documentation":"

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data. This action can be called once every 24 hours for a specific shared device.

" + "documentation":"

When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

" }, "DeleteGatewayGroup":{ "name":"DeleteGatewayGroup", @@ -2141,6 +2141,10 @@ "shape":"WakeWord", "documentation":"

A wake word for Alexa, Echo, Amazon, or a computer.

" }, + "Locale":{ + "shape":"DeviceLocale", + "documentation":"

The locale of the room profile.

" + }, "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

The user-specified token that is used during the creation of a profile.

", @@ -2683,6 +2687,11 @@ ] }, "DeviceEventValue":{"type":"string"}, + "DeviceLocale":{ + "type":"string", + "max":256, + "min":1 + }, "DeviceName":{ "type":"string", "max":100, @@ -2892,7 +2901,7 @@ "type":"string", "max":128, "min":1, - "pattern":"([0-9a-zA-Z]([+-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,9})" + "pattern":"([0-9a-zA-Z]([+-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z]([-\\w]*[0-9a-zA-Z]+)*\\.)+[a-zA-Z]{2,9})" }, "EnablementType":{ "type":"string", @@ -4042,6 +4051,10 @@ "shape":"WakeWord", "documentation":"

The wake word of a room profile.

" }, + "Locale":{ + "shape":"DeviceLocale", + "documentation":"

The locale of a room profile.

" + }, "SetupModeDisabled":{ "shape":"Boolean", "documentation":"

The setup mode of a room profile.

" @@ -4095,6 +4108,10 @@ "WakeWord":{ "shape":"WakeWord", "documentation":"

The wake word of a room profile.

" + }, + "Locale":{ + "shape":"DeviceLocale", + "documentation":"

The locale of a room profile.

" } }, "documentation":"

The data of a room profile.

" @@ -5621,6 +5638,10 @@ "shape":"WakeWord", "documentation":"

The updated wake word for the room profile.

" }, + "Locale":{ + "shape":"DeviceLocale", + "documentation":"

The updated locale for the room profile.

" + }, "SetupModeDisabled":{ "shape":"Boolean", "documentation":"

Whether the setup mode of the profile is enabled.

" diff --git a/botocore/data/amplify/2017-07-25/service-2.json b/botocore/data/amplify/2017-07-25/service-2.json index 3aeb2ea2..b342d6f8 100644 --- a/botocore/data/amplify/2017-07-25/service-2.json +++ b/botocore/data/amplify/2017-07-25/service-2.json @@ -185,6 +185,22 @@ ], "documentation":"

Deletes a webhook.

" }, + "GenerateAccessLogs":{ + "name":"GenerateAccessLogs", + "http":{ + "method":"POST", + "requestUri":"/apps/{appId}/accesslogs" + }, + "input":{"shape":"GenerateAccessLogsRequest"}, + "output":{"shape":"GenerateAccessLogsResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Retrieve website access logs for a specific time range via a pre-signed URL. Optionally, deliver the logs to a given S3 bucket.

" + }, "GetApp":{ "name":"GetApp", "http":{ @@ -201,6 +217,23 @@ ], "documentation":"

Retrieves an existing Amplify App by appId.

" }, + "GetArtifactUrl":{ + "name":"GetArtifactUrl", + "http":{ + "method":"GET", + "requestUri":"/artifacts/{artifactId}" + }, + "input":{"shape":"GetArtifactUrlRequest"}, + "output":{"shape":"GetArtifactUrlResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Retrieves artifact info that corresponds to a artifactId.

" + }, "GetBranch":{ "name":"GetBranch", "http":{ @@ -282,6 +315,22 @@ ], "documentation":"

Lists existing Amplify Apps.

" }, + "ListArtifacts":{ + "name":"ListArtifacts", + "http":{ + "method":"GET", + "requestUri":"/apps/{appId}/branches/{branchName}/jobs/{jobId}/artifacts" + }, + "input":{"shape":"ListArtifactsRequest"}, + "output":{"shape":"ListArtifactsResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

List artifacts with an app, a branch, a job and an artifact type.

" + }, "ListBranches":{ "name":"ListBranches", "http":{ @@ -635,6 +684,44 @@ "type":"list", "member":{"shape":"App"} }, + "Artifact":{ + "type":"structure", + "required":[ + "artifactFileName", + "artifactId" + ], + "members":{ + "artifactFileName":{ + "shape":"ArtifactFileName", + "documentation":"

File name for the artifact.

" + }, + "artifactId":{ + "shape":"ArtifactId", + "documentation":"

Unique Id for a artifact.

" + } + }, + "documentation":"

Structure for artifact.

" + }, + "ArtifactFileName":{ + "type":"string", + "max":1000 + }, + "ArtifactId":{ + "type":"string", + "max":255 + }, + "ArtifactType":{ + "type":"string", + "enum":["TEST"] + }, + "ArtifactUrl":{ + "type":"string", + "max":1000 + }, + "Artifacts":{ + "type":"list", + "member":{"shape":"Artifact"} + }, "ArtifactsUrl":{ "type":"string", "max":1000 @@ -678,6 +765,10 @@ "buildSpec":{ "shape":"BuildSpec", "documentation":"

BuildSpec for the auto created branch.

" + }, + "enablePullRequestPreview":{ + "shape":"EnablePullRequestPreview", + "documentation":"

Enables Pull Request Preview for auto created branch.

" } }, "documentation":"

Structure with auto branch creation config.

" @@ -722,7 +813,8 @@ "activeJobId", "totalNumberOfJobs", "enableBasicAuth", - "ttl" + "ttl", + "enablePullRequestPreview" ], "members":{ "branchArn":{ @@ -808,6 +900,18 @@ "associatedResources":{ "shape":"AssociatedResources", "documentation":"

List of custom resources that are linked to this branch.

" + }, + "enablePullRequestPreview":{ + "shape":"EnablePullRequestPreview", + "documentation":"

Enables Pull Request Preview for this branch.

" + }, + "destinationBranch":{ + "shape":"BranchName", + "documentation":"

The destination branch if the branch is a pull request branch.

" + }, + "sourceBranch":{ + "shape":"BranchName", + "documentation":"

The source branch if the branch is a pull request branch.

" } }, "documentation":"

Branch for an Amplify App, which maps to a 3rd party repository branch.

" @@ -998,6 +1102,10 @@ "displayName":{ "shape":"DisplayName", "documentation":"

Display name for a branch, will use as the default domain prefix.

" + }, + "enablePullRequestPreview":{ + "shape":"EnablePullRequestPreview", + "documentation":"

Enables Pull Request Preview for this branch.

" } }, "documentation":"

Request structure for a branch create request.

" @@ -1424,6 +1532,7 @@ "EnableBasicAuth":{"type":"boolean"}, "EnableBranchAutoBuild":{"type":"boolean"}, "EnableNotification":{"type":"boolean"}, + "EnablePullRequestPreview":{"type":"boolean"}, "EndTime":{"type":"timestamp"}, "EnvKey":{ "type":"string", @@ -1460,6 +1569,44 @@ "type":"string", "max":255 }, + "GenerateAccessLogsRequest":{ + "type":"structure", + "required":[ + "domainName", + "appId" + ], + "members":{ + "startTime":{ + "shape":"StartTime", + "documentation":"

The time at which the logs should start, inclusive.

" + }, + "endTime":{ + "shape":"EndTime", + "documentation":"

The time at which the logs should end, inclusive.

" + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

Name of the domain.

" + }, + "appId":{ + "shape":"AppId", + "documentation":"

Unique Id for an Amplify App.

", + "location":"uri", + "locationName":"appId" + } + }, + "documentation":"

Request structure for the generate access logs request.

" + }, + "GenerateAccessLogsResult":{ + "type":"structure", + "members":{ + "logUrl":{ + "shape":"LogUrl", + "documentation":"

Pre-signed URL for the requested access logs.

" + } + }, + "documentation":"

Result structure for the generate access logs request.

" + }, "GetAppRequest":{ "type":"structure", "required":["appId"], @@ -1480,6 +1627,37 @@ "app":{"shape":"App"} } }, + "GetArtifactUrlRequest":{ + "type":"structure", + "required":["artifactId"], + "members":{ + "artifactId":{ + "shape":"ArtifactId", + "documentation":"

Unique Id for a artifact.

", + "location":"uri", + "locationName":"artifactId" + } + }, + "documentation":"

Request structure for the get artifact request.

" + }, + "GetArtifactUrlResult":{ + "type":"structure", + "required":[ + "artifactId", + "artifactUrl" + ], + "members":{ + "artifactId":{ + "shape":"ArtifactId", + "documentation":"

Unique Id for a artifact.

" + }, + "artifactUrl":{ + "shape":"ArtifactUrl", + "documentation":"

Presigned url for the artifact.

" + } + }, + "documentation":"

Result structure for the get artifact request.

" + }, "GetBranchRequest":{ "type":"structure", "required":[ @@ -1763,6 +1941,66 @@ }, "documentation":"

Result structure for an Amplify App list request.

" }, + "ListArtifactsRequest":{ + "type":"structure", + "required":[ + "appId", + "branchName", + "jobId" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

Unique Id for an Amplify App.

", + "location":"uri", + "locationName":"appId" + }, + "branchName":{ + "shape":"BranchName", + "documentation":"

Name for a branch, part of an Amplify App.

", + "location":"uri", + "locationName":"branchName" + }, + "jobId":{ + "shape":"JobId", + "documentation":"

Unique Id for an Job.

", + "location":"uri", + "locationName":"jobId" + }, + "artifactType":{ + "shape":"ArtifactType", + "documentation":"

Type for an artifact.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Pagination token. Set to null to start listing artifacts from start. If non-null pagination token is returned in a result, then pass its value in here to list more artifacts.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

Maximum number of records to list in a single response.

", + "location":"querystring", + "locationName":"maxResults" + } + }, + "documentation":"

Request structure for the list artifacts request.

" + }, + "ListArtifactsResult":{ + "type":"structure", + "required":["artifacts"], + "members":{ + "artifacts":{ + "shape":"Artifacts", + "documentation":"

List of artifacts.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + } + }, + "documentation":"

Result structure for the list artifacts request.

" + }, "ListBranchesRequest":{ "type":"structure", "required":["appId"], @@ -2063,7 +2301,8 @@ "PRODUCTION", "BETA", "DEVELOPMENT", - "EXPERIMENTAL" + "EXPERIMENTAL", + "PULL_REQUEST" ] }, "StartDeploymentRequest":{ @@ -2168,7 +2407,7 @@ "StartTime":{"type":"timestamp"}, "Status":{ "type":"string", - "max":3, + "max":7, "min":3 }, "StatusReason":{ @@ -2208,6 +2447,14 @@ "shape":"ArtifactsUrl", "documentation":"

URL to the artifact for the execution step.

" }, + "testArtifactsUrl":{ + "shape":"TestArtifactsUrl", + "documentation":"

URL to the test artifact for the execution step.

" + }, + "testConfigUrl":{ + "shape":"TestConfigUrl", + "documentation":"

URL to the test config for the execution step.

" + }, "screenshots":{ "shape":"Screenshots", "documentation":"

List of screenshot URLs for the execution step, if relevant.

" @@ -2380,6 +2627,14 @@ "max":2048, "min":1 }, + "TestArtifactsUrl":{ + "type":"string", + "max":1000 + }, + "TestConfigUrl":{ + "type":"string", + "max":1000 + }, "ThumbnailName":{ "type":"string", "max":256 @@ -2490,7 +2745,19 @@ }, "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", - "documentation":"

Automated branch creation config for the Amplify App.

" + "documentation":"

Automated branch creation branchConfig for the Amplify App.

" + }, + "repository":{ + "shape":"Repository", + "documentation":"

Repository for an Amplify App

" + }, + "oauthToken":{ + "shape":"OauthToken", + "documentation":"

OAuth token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. OAuth token is not stored.

" + }, + "accessToken":{ + "shape":"AccessToken", + "documentation":"

Personal Access token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. Token is not stored.

" } }, "documentation":"

Request structure for update App request.

" @@ -2568,6 +2835,10 @@ "displayName":{ "shape":"DisplayName", "documentation":"

Display name for a branch, will use as the default domain prefix.

" + }, + "enablePullRequestPreview":{ + "shape":"EnablePullRequestPreview", + "documentation":"

Enables Pull Request Preview for this branch.

" } }, "documentation":"

Request structure for update branch request.

" diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index 62753acb..0e92e902 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -407,7 +407,8 @@ "errors":[ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} ], "documentation":"

Deletes the DomainName resource.

" }, @@ -3602,6 +3603,10 @@ "types":{ "shape":"ListOfEndpointType", "documentation":"

A list of endpoint types of an API (RestApi) or its custom domain name (DomainName). For an edge-optimized API and its custom domain name, the endpoint type is \"EDGE\". For a regional API and its custom domain name, the endpoint type is REGIONAL. For a private API, the endpoint type is PRIVATE.

" + }, + "vpcEndpointIds":{ + "shape":"ListOfString", + "documentation":"

A list of VpcEndpointIds of an API (RestApi) against which to create Route53 ALIASes. It is only supported for PRIVATE endpoint type.

" } }, "documentation":"

The endpoint configuration to indicate the types of endpoints an API (RestApi) or its custom domain name (DomainName) has.

" diff --git a/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json b/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json index eea6c940..39fc9171 100644 --- a/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json +++ b/botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json @@ -11,6 +11,53 @@ "signatureVersion" : "v4" }, "operations" : { + "DeleteConnection" : { + "name" : "DeleteConnection", + "http" : { + "method" : "DELETE", + "requestUri" : "/@connections/{connectionId}", + "responseCode" : 204 + }, + "input" : { + "shape" : "DeleteConnectionRequest" + }, + "errors" : [ { + "shape" : "GoneException", + "documentation" : "

The connection with the provided id no longer exists.

" + }, { + "shape" : "LimitExceededException", + "documentation" : "

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

" + }, { + "shape" : "ForbiddenException", + "documentation" : "

The caller is not authorized to invoke this operation.

" + } ], + "documentation" : "

Delete the connection with the provided id.

" + }, + "GetConnection" : { + "name" : "GetConnection", + "http" : { + "method" : "GET", + "requestUri" : "/@connections/{connectionId}", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetConnectionRequest" + }, + "output" : { + "shape" : "GetConnectionResponse" + }, + "errors" : [ { + "shape" : "GoneException", + "documentation" : "

The connection with the provided id no longer exists.

" + }, { + "shape" : "LimitExceededException", + "documentation" : "

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

" + }, { + "shape" : "ForbiddenException", + "documentation" : "

The caller is not authorized to invoke this operation.

" + } ], + "documentation" : "

Get information about the connection with the provided id.

" + }, "PostToConnection" : { "name" : "PostToConnection", "http" : { @@ -26,7 +73,7 @@ "documentation" : "

The connection with the provided id no longer exists.

" }, { "shape" : "LimitExceededException", - "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

" }, { "shape" : "PayloadTooLargeException", "documentation" : "

The data has exceeded the maximum size allowed.

" @@ -43,32 +90,80 @@ "max" : 131072, "documentation" : "

The data to be sent to the client specified by its connection id.

" }, + "DeleteConnectionRequest" : { + "type" : "structure", + "members" : { + "ConnectionId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "connectionId" + } + }, + "required" : [ "ConnectionId" ] + }, "ForbiddenException" : { "type" : "structure", "members" : { }, + "documentation" : "

The caller is not authorized to invoke this operation.

", "exception" : true, "error" : { "httpStatusCode" : 403 + } + }, + "GetConnectionRequest" : { + "type" : "structure", + "members" : { + "ConnectionId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "connectionId" + } }, - "documentation" : "

The caller is not authorized to invoke this operation.

" + "required" : [ "ConnectionId" ] + }, + "GetConnectionResponse" : { + "type" : "structure", + "members" : { + "ConnectedAt" : { + "shape" : "__timestampIso8601", + "locationName" : "connectedAt", + "documentation" : "

The time in ISO 8601 format for when the connection was established.

" + }, + "Identity" : { + "shape" : "Identity", + "locationName" : "identity" + }, + "LastActiveAt" : { + "shape" : "__timestampIso8601", + "locationName" : "lastActiveAt", + "documentation" : "

The time in ISO 8601 format for when the connection was last active.

" + } + } }, "GoneException" : { "type" : "structure", "members" : { }, + "documentation" : "

The connection with the provided id no longer exists.

", "exception" : true, "error" : { "httpStatusCode" : 410 - }, - "documentation" : "

The connection with the provided id no longer exists.

" + } }, - "LimitExceededException" : { + "Identity" : { "type" : "structure", - "members" : { }, - "exception" : true, - "error" : { - "httpStatusCode" : 429 + "members" : { + "SourceIp" : { + "shape" : "__string", + "locationName" : "sourceIp", + "documentation" : "

The source IP address of the TCP connection making the request to API Gateway.

" + }, + "UserAgent" : { + "shape" : "__string", + "locationName" : "userAgent", + "documentation" : "

The User Agent of the API caller.

" + } }, - "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" + "required" : [ "SourceIp", "UserAgent" ] }, "PayloadTooLargeException" : { "type" : "structure", @@ -78,11 +173,11 @@ "locationName" : "message" } }, + "documentation" : "

The data has exceeded the maximum size allowed.

", "exception" : true, "error" : { "httpStatusCode" : 413 - }, - "documentation" : "

The data has exceeded the maximum size allowed.

" + } }, "PostToConnectionRequest" : { "type" : "structure", @@ -101,9 +196,22 @@ "required" : [ "ConnectionId", "Data" ], "payload" : "Data" }, + "LimitExceededException" : { + "type" : "structure", + "members" : { }, + "documentation" : "

The client is sending more than the allowed number of requests per unit of time or the WebSocket client side buffer is full.

", + "exception" : true, + "error" : { + "httpStatusCode" : 429 + } + }, "__string" : { "type" : "string" + }, + "__timestampIso8601" : { + "type" : "timestamp", + "timestampFormat" : "iso8601" } }, "documentation" : "

The Amazon API Gateway Management API allows you to directly manage runtime aspects of your deployed APIs. To use it, you must explicitly set the SDK's endpoint to point to the endpoint of your deployed API. The endpoint will be of the form https://{api-id}.execute-api.{region}.amazonaws.com/{stage}, or will be the endpoint corresponding to your API's custom domain and base path, if applicable.

" -} +} \ No newline at end of file diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index d7cc66a3..7c11d6de 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -770,6 +770,10 @@ "RoleARN":{ "shape":"ResourceIdMaxLen1600", "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required, and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" + }, + "SuspendedState":{ + "shape":"SuspendedState", + "documentation":"

An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities.

Suspension Outcomes

For more information, see Suspend and Resume Application Auto Scaling in the Application Auto Scaling User Guide.

" } } }, @@ -853,7 +857,8 @@ "CreationTime":{ "shape":"TimestampType", "documentation":"

The Unix timestamp for when the scalable target was created.

" - } + }, + "SuspendedState":{"shape":"SuspendedState"} }, "documentation":"

Represents a scalable target.

" }, @@ -1010,6 +1015,7 @@ }, "documentation":"

Represents a scaling policy to use with Application Auto Scaling.

" }, + "ScalingSuspended":{"type":"boolean"}, "ScheduledAction":{ "type":"structure", "required":[ @@ -1136,6 +1142,24 @@ }, "documentation":"

Represents a step scaling policy configuration to use with Application Auto Scaling.

" }, + "SuspendedState":{ + "type":"structure", + "members":{ + "DynamicScalingInSuspended":{ + "shape":"ScalingSuspended", + "documentation":"

Whether scale in by a target tracking scaling policy or a step scaling policy is suspended. Set the value to true if you don't want Application Auto Scaling to remove capacity when a scaling policy is triggered. The default is false.

" + }, + "DynamicScalingOutSuspended":{ + "shape":"ScalingSuspended", + "documentation":"

Whether scale out by a target tracking scaling policy or a step scaling policy is suspended. Set the value to true if you don't want Application Auto Scaling to add capacity when a scaling policy is triggered. The default is false.

" + }, + "ScheduledScalingSuspended":{ + "shape":"ScalingSuspended", + "documentation":"

Whether scheduled scaling is suspended. Set the value to true if you don't want Application Auto Scaling to add or remove capacity by initiating scheduled actions. The default is false.

" + } + }, + "documentation":"

Specifies whether the scaling activities for a scalable target are in a suspended state.

" + }, "TargetTrackingScalingPolicyConfiguration":{ "type":"structure", "required":["TargetValue"], @@ -1181,5 +1205,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

API Summary

The Application Auto Scaling service API includes two key sets of actions:

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" + "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

API Summary

The Application Auto Scaling service API includes three key sets of actions:

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" } diff --git a/botocore/data/appmesh/2019-01-25/service-2.json b/botocore/data/appmesh/2019-01-25/service-2.json index 4aef8a19..876c6a55 100644 --- a/botocore/data/appmesh/2019-01-25/service-2.json +++ b/botocore/data/appmesh/2019-01-25/service-2.json @@ -677,6 +677,9 @@ { "shape": "BadRequestException" }, + { + "shape": "ForbiddenException" + }, { "shape": "InternalServerErrorException" }, @@ -685,6 +688,9 @@ }, { "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" } ], "documentation": "

List the tags for an App Mesh resource.

" @@ -811,6 +817,9 @@ { "shape": "BadRequestException" }, + { + "shape": "ForbiddenException" + }, { "shape": "InternalServerErrorException" }, @@ -820,6 +829,9 @@ { "shape": "ServiceUnavailableException" }, + { + "shape": "TooManyRequestsException" + }, { "shape": "TooManyTagsException" } @@ -844,6 +856,9 @@ { "shape": "BadRequestException" }, + { + "shape": "ForbiddenException" + }, { "shape": "InternalServerErrorException" }, @@ -852,6 +867,9 @@ }, { "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" } ], "documentation": "

Deletes specified tags from a resource.

", @@ -1125,6 +1143,14 @@ }, "documentation": "" }, + "TcpRetryPolicyEvents": { + "type": "list", + "member": { + "shape": "TcpRetryPolicyEvent" + }, + "min": 1, + "max": 1 + }, "CreateVirtualServiceInput": { "type": "structure", "required": [ @@ -1787,6 +1813,36 @@ }, "documentation": "

An object representing a virtual service returned by a describe operation.

" }, + "Boolean": { + "type": "boolean", + "box": true + }, + "HttpRouteHeader": { + "type": "structure", + "required": [ + "name" + ], + "members": { + "invert": { + "shape": "Boolean", + "documentation": "

Specify True to match the opposite of the HeaderMatchMethod method and value. The default value is False.

" + }, + "match": { + "shape": "HeaderMatchMethod", + "documentation": "

The HeaderMatchMethod object.

" + }, + "name": { + "shape": "HeaderName", + "documentation": "

A name for the HTTP header in the client request that will be matched on.

" + } + }, + "documentation": "

An object representing the HTTP header in the request.

" + }, + "HttpRetryPolicyEvent": { + "type": "string", + "min": 1, + "max": 25 + }, "DescribeVirtualServiceOutput": { "type": "structure", "required": [ @@ -2049,6 +2105,12 @@ }, "documentation": "

An object representing the AWS Cloud Map attribute information for your virtual node.

" }, + "TcpRetryPolicyEvent": { + "type": "string", + "enum": [ + "connection-error" + ] + }, "VirtualServiceSpec": { "type": "structure", "members": { @@ -2069,6 +2131,24 @@ }, "documentation": "

An object representing the backends that a virtual node is expected to send outbound\n traffic to.

" }, + "MatchRange": { + "type": "structure", + "required": [ + "end", + "start" + ], + "members": { + "end": { + "shape": "Long", + "documentation": "

The end of the range.

" + }, + "start": { + "shape": "Long", + "documentation": "

The start of the range.

" + } + }, + "documentation": "

The range of values to match on. The first character of the range is included in the range, though the last character is not. For example, if the range specified were 1-100, only values 1-99 would be matched.

" + }, "ListVirtualRoutersLimit": { "type": "integer", "box": true, @@ -2211,6 +2291,19 @@ }, "documentation": "" }, + "DurationUnit": { + "type": "string", + "enum": [ + "ms", + "s" + ] + }, + "RoutePriority": { + "type": "integer", + "box": true, + "min": 0, + "max": 1000 + }, "ListVirtualServicesInput": { "type": "structure", "required": [ @@ -2364,6 +2457,14 @@ }, "documentation": "

An object representing the status of a virtual service.

" }, + "HttpRetryPolicyEvents": { + "type": "list", + "member": { + "shape": "HttpRetryPolicyEvent" + }, + "min": 1, + "max": 25 + }, "ListVirtualNodesLimit": { "type": "integer", "box": true, @@ -2421,6 +2522,11 @@ "Timestamp": { "type": "timestamp" }, + "HeaderMatch": { + "type": "string", + "min": 1, + "max": 255 + }, "VirtualNodeSpec": { "type": "structure", "members": { @@ -2468,6 +2574,34 @@ "min": 1, "max": 1 }, + "HttpMethod": { + "type": "string", + "enum": [ + "CONNECT", + "DELETE", + "GET", + "HEAD", + "OPTIONS", + "PATCH", + "POST", + "PUT", + "TRACE" + ] + }, + "Duration": { + "type": "structure", + "members": { + "unit": { + "shape": "DurationUnit", + "documentation": "

The unit of time between retry attempts.

" + }, + "value": { + "shape": "DurationValue", + "documentation": "

The duration of time between retry attempts.

" + } + }, + "documentation": "

An object representing the duration between retry attempts.

" + }, "ConflictException": { "type": "structure", "members": { @@ -2503,9 +2637,21 @@ "prefix" ], "members": { + "headers": { + "shape": "HttpRouteHeaders", + "documentation": "

The client request headers to match on.

" + }, + "method": { + "shape": "HttpMethod", + "documentation": "

The client request header method to match on.

" + }, "prefix": { "shape": "String", "documentation": "

Specifies the path to match requests with. This parameter must always start with\n /, which by itself matches all requests to the virtual service name. You\n can also match for path-based routing of requests. For example, if your virtual service\n name is my-service.local and you want the route to match requests to\n my-service.local/metrics, your prefix should be\n /metrics.

" + }, + "scheme": { + "shape": "HttpScheme", + "documentation": "

The client request header scheme to match on.

" } }, "documentation": "

An object representing the requirements for a route to match HTTP requests for a virtual\n router.

" @@ -2551,6 +2697,11 @@ }, "documentation": "

An object representing a service mesh returned by a list operation.

" }, + "MaxRetries": { + "type": "long", + "box": true, + "min": 0 + }, "MeshStatusCode": { "type": "string", "enum": [ @@ -2868,6 +3019,11 @@ "fault": true } }, + "HeaderName": { + "type": "string", + "min": 1, + "max": 50 + }, "TagList": { "type": "list", "member": { @@ -2876,6 +3032,32 @@ "min": 0, "max": 50 }, + "HttpRetryPolicy": { + "type": "structure", + "required": [ + "maxRetries", + "perRetryTimeout" + ], + "members": { + "httpRetryEvents": { + "shape": "HttpRetryPolicyEvents", + "documentation": "

Specify at least one of the following values.

\n " + }, + "maxRetries": { + "shape": "MaxRetries", + "documentation": "

The maximum number of retry attempts. If no value is specified, the default is 1.

" + }, + "perRetryTimeout": { + "shape": "Duration", + "documentation": "

An object that represents the retry duration.

" + }, + "tcpRetryEvents": { + "shape": "TcpRetryPolicyEvents", + "documentation": "

Specify a valid value.

" + } + }, + "documentation": "

An object that represents a retry policy.

" + }, "DescribeVirtualRouterInput": { "type": "structure", "required": [ @@ -2939,6 +3121,32 @@ "senderFault": true } }, + "HeaderMatchMethod": { + "type": "structure", + "members": { + "exact": { + "shape": "HeaderMatch", + "documentation": "

The header value sent by the client must match the specified value exactly.

" + }, + "prefix": { + "shape": "HeaderMatch", + "documentation": "

The header value sent by the client must begin with the specified characters.

" + }, + "range": { + "shape": "MatchRange", + "documentation": "

The object that specifies the range of numbers that the header value sent by the client must be included in.

" + }, + "regex": { + "shape": "HeaderMatch", + "documentation": "

The header value sent by the client must include the specified characters.

" + }, + "suffix": { + "shape": "HeaderMatch", + "documentation": "

The header value sent by the client must end with the specified characters.

" + } + }, + "documentation": "

An object representing the method and value to match the header value sent with a request. Specify one match method.

" + }, "DeleteMeshOutput": { "type": "structure", "required": [ @@ -2960,6 +3168,11 @@ "DROP_ALL" ] }, + "DurationValue": { + "type": "long", + "box": true, + "min": 0 + }, "Hostname": { "type": "string" }, @@ -3037,6 +3250,14 @@ "min": 1, "max": 10 }, + "HttpRouteHeaders": { + "type": "list", + "member": { + "shape": "HttpRouteHeader" + }, + "min": 1, + "max": 10 + }, "VirtualServiceProvider": { "type": "structure", "members": { @@ -3076,6 +3297,13 @@ }, "documentation": "

An object representing the AWS Cloud Map service discovery information for your virtual\n node.

" }, + "HttpScheme": { + "type": "string", + "enum": [ + "http", + "https" + ] + }, "UpdateVirtualServiceOutput": { "type": "structure", "required": [ @@ -3195,6 +3423,10 @@ "shape": "HttpRoute", "documentation": "

The HTTP routing information for the route.

" }, + "priority": { + "shape": "RoutePriority", + "documentation": "

The priority for the route. Routes are matched based on the specified value, where 0 is the highest priority.

" + }, "tcpRoute": { "shape": "TcpRoute", "documentation": "

The TCP routing information for the route.

" @@ -3216,6 +3448,10 @@ "match": { "shape": "HttpRouteMatch", "documentation": "

The criteria for determining an HTTP request match.

" + }, + "retryPolicy": { + "shape": "HttpRetryPolicy", + "documentation": "

An object that represents a retry policy.

" } }, "documentation": "

An object representing the HTTP routing specification for a route.

" diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 6a4d44ab..2d6d76ad 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -190,7 +190,8 @@ "output":{"shape":"CreateUsageReportSubscriptionResult"}, "errors":[ {"shape":"InvalidRoleException"}, - {"shape":"InvalidAccountStatusException"} + {"shape":"InvalidAccountStatusException"}, + {"shape":"LimitExceededException"} ], "documentation":"

Creates a usage report subscription. Usage reports are generated daily.

" }, @@ -543,7 +544,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" }, "StartFleet":{ "name":"StartFleet", @@ -558,7 +559,9 @@ {"shape":"OperationNotPermittedException"}, {"shape":"LimitExceededException"}, {"shape":"InvalidAccountStatusException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotAvailableException"}, + {"shape":"InvalidRoleException"} ], "documentation":"

Starts the specified fleet.

" }, @@ -621,7 +624,7 @@ {"shape":"InvalidAccountStatusException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Adds or overwrites one or more tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

To list the current tags for your resources, use ListTagsForResource. To disassociate tags from your resources, use UntagResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -634,7 +637,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" }, "UpdateDirectoryConfig":{ "name":"UpdateDirectoryConfig", @@ -671,7 +674,7 @@ {"shape":"IncompatibleImageException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName, ComputeCapacity, ImageARN, ImageName, and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

" + "documentation":"

Updates the specified fleet.

If the fleet is in the STOPPED state, you can update any attribute except the fleet name. If the fleet is in the RUNNING state, you can update the DisplayName, ComputeCapacity, ImageARN, ImageName, IdleDisconnectTimeoutInSeconds, and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING or STOPPING state, you can't update it.

" }, "UpdateImagePermissions":{ "name":"UpdateImagePermissions", @@ -711,6 +714,31 @@ } }, "shapes":{ + "AccessEndpoint":{ + "type":"structure", + "required":["EndpointType"], + "members":{ + "EndpointType":{ + "shape":"AccessEndpointType", + "documentation":"

The type of interface endpoint.

" + }, + "VpceId":{ + "shape":"String", + "documentation":"

The identifier (ID) of the VPC in which the interface endpoint is used.

" + } + }, + "documentation":"

Describes an interface VPC endpoint (interface endpoint) that lets you create a private connection between the virtual private cloud (VPC) that you specify and AppStream 2.0. When you specify an interface endpoint for a stack, users of the stack can connect to AppStream 2.0 only through that endpoint. When you specify an interface endpoint for an image builder, administrators can connect to the image builder only through that endpoint.

" + }, + "AccessEndpointList":{ + "type":"list", + "member":{"shape":"AccessEndpoint"}, + "max":4, + "min":1 + }, + "AccessEndpointType":{ + "type":"string", + "enum":["STREAMING"] + }, "AccountName":{ "type":"string", "min":1, @@ -1066,11 +1094,15 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to associate with the fleet. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

For more information, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

The tags to associate with the fleet. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

For more information, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" }, "IdleDisconnectTimeoutInSeconds":{ "shape":"Integer", "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

" } } }, @@ -1118,6 +1150,10 @@ "shape":"VpcConfig", "documentation":"

The VPC configuration for the image builder. You can specify only one subnet.

" }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to apply to the image builder. To assume a role, the image builder calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

" + }, "EnableDefaultInternetAccess":{ "shape":"BooleanObject", "documentation":"

Enables or disables default internet access for the image builder.

" @@ -1132,7 +1168,11 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to associate with the image builder. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

The tags to associate with the image builder. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" + }, + "AccessEndpoints":{ + "shape":"AccessEndpointList", + "documentation":"

The list of interface VPC endpoint (interface endpoint) objects. Administrators can connect to the image builder only through the specified endpoints.

" } } }, @@ -1210,7 +1250,11 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to associate with the stack. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

The tags to associate with the stack. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" + }, + "AccessEndpoints":{ + "shape":"AccessEndpointList", + "documentation":"

The list of interface VPC endpoint (interface endpoint) objects. Users of the stack can connect to AppStream 2.0 only through the specified endpoints.

" } } }, @@ -1253,7 +1297,7 @@ }, "SessionContext":{ "shape":"String", - "documentation":"

The session context. For more information, see Session Context in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

The session context. For more information, see Session Context in the Amazon AppStream 2.0 Administration Guide.

" } } }, @@ -1960,7 +2004,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The ARN for the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) for the fleet.

" }, "Name":{ "shape":"String", @@ -2029,6 +2073,10 @@ "IdleDisconnectTimeoutInSeconds":{ "shape":"Integer", "documentation":"

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the DisconnectTimeoutInSeconds time interval begins. Users are notified before they are disconnected due to inactivity. If users try to reconnect to the streaming session before the time interval specified in DisconnectTimeoutInSeconds elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected.

To prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600. The default value is 0.

If you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.

" + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the IAM role that is applied to the fleet. To assume a role, the fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

" } }, "documentation":"

Describes a fleet.

" @@ -2039,7 +2087,8 @@ "enum":[ "VPC_CONFIGURATION", "VPC_CONFIGURATION_SECURITY_GROUP_IDS", - "DOMAIN_JOIN_INFO" + "DOMAIN_JOIN_INFO", + "IAM_ROLE_ARN" ] }, "FleetAttributes":{ @@ -2070,6 +2119,8 @@ "NETWORK_INTERFACE_LIMIT_EXCEEDED", "INTERNAL_SERVICE_ERROR", "IAM_SERVICE_ROLE_IS_MISSING", + "MACHINE_ROLE_IS_MISSING", + "STS_DISABLED_IN_REGION", "SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES", "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION", "SUBNET_NOT_FOUND", @@ -2224,6 +2275,10 @@ "shape":"PlatformType", "documentation":"

The operating system platform of the image builder.

" }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the IAM role that is applied to the image builder. To assume a role, the image builder calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

" + }, "State":{ "shape":"ImageBuilderState", "documentation":"

The state of the image builder.

" @@ -2252,6 +2307,10 @@ "AppstreamAgentVersion":{ "shape":"AppstreamAgentVersion", "documentation":"

The version of the AppStream 2.0 agent that is currently being used by the image builder.

" + }, + "AccessEndpoints":{ + "shape":"AccessEndpointList", + "documentation":"

The list of virtual private cloud (VPC) interface endpoint objects. Administrators can connect to the image builder only through the specified endpoints.

" } }, "documentation":"

Describes a virtual machine that is used to create an image.

" @@ -2784,6 +2843,10 @@ "ApplicationSettings":{ "shape":"ApplicationSettingsResponse", "documentation":"

The persistent application settings for users of the stack.

" + }, + "AccessEndpoints":{ + "shape":"AccessEndpointList", + "documentation":"

The list of virtual private cloud (VPC) interface endpoint objects. Users of the stack can connect to AppStream 2.0 only through the specified endpoints.

" } }, "documentation":"

Describes a stack.

" @@ -2798,7 +2861,9 @@ "REDIRECT_URL", "FEEDBACK_URL", "THEME_NAME", - "USER_SETTINGS" + "USER_SETTINGS", + "IAM_ROLE_ARN", + "ACCESS_ENDPOINTS" ] }, "StackAttributes":{ @@ -3120,6 +3185,10 @@ "AttributesToDelete":{ "shape":"FleetAttributes", "documentation":"

The fleet attributes to delete.

" + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To assume a role, a fleet instance calls the AWS Security Token Service (STS) AssumeRole API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials.

" } } }, @@ -3203,6 +3272,10 @@ "ApplicationSettings":{ "shape":"ApplicationSettings", "documentation":"

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

" + }, + "AccessEndpoints":{ + "shape":"AccessEndpointList", + "documentation":"

The list of interface VPC endpoint (interface endpoint) objects. Users of the stack can connect to AppStream 2.0 only through the specified endpoints.

" } } }, @@ -3423,5 +3496,5 @@ "documentation":"

Describes VPC configuration information for fleets and image builders.

" } }, - "documentation":"Amazon AppStream 2.0

This is the Amazon AppStream 2.0 API Reference. This documentation provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed, secure application streaming service that lets you stream desktop applications to users without rewriting applications. AppStream 2.0 manages the AWS resources that are required to host and run your applications, scales automatically, and provides access to your users on demand.

To learn more about AppStream 2.0, see the following resources:

" + "documentation":"Amazon AppStream 2.0

This is the Amazon AppStream 2.0 API Reference. This documentation provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed, secure application streaming service that lets you stream desktop applications to users without rewriting applications. AppStream 2.0 manages the AWS resources that are required to host and run your applications, scales automatically, and provides access to your users on demand.

You can call the AppStream 2.0 API operations by using an interface VPC endpoint (interface endpoint). For more information, see Access AppStream 2.0 API Operations and CLI Commands Through an Interface VPC Endpoint in the Amazon AppStream 2.0 Administration Guide.

To learn more about AppStream 2.0, see the following resources:

" } diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index adc47263..2cb116c3 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -139,7 +139,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Returns the results of a single query execution specified by QueryExecutionId if you have access to the workgroup in which the query ran. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

" + "documentation":"

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

" }, "GetWorkGroup":{ "name":"GetWorkGroup", @@ -815,7 +815,7 @@ "type":"integer", "box":true, "max":1000, - "min":0 + "min":1 }, "MaxTagsCount":{ "type":"integer", @@ -908,7 +908,7 @@ }, "Statistics":{ "shape":"QueryExecutionStatistics", - "documentation":"

The amount of data scanned during the query execution and the amount of time that it took to execute, and the type of statement that was run.

" + "documentation":"

The location of a manifest file that tracks file locations generated by the query, the amount of data scanned by the query, and the amount of time that it took the query to run.

" }, "WorkGroup":{ "shape":"WorkGroupName", @@ -958,9 +958,13 @@ "DataScannedInBytes":{ "shape":"Long", "documentation":"

The number of bytes in the data that was queried.

" + }, + "DataManifestLocation":{ + "shape":"String", + "documentation":"

The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. It tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Output Files in the Amazon Athena User Guide.

" } }, - "documentation":"

The amount of data scanned during the query execution and the amount of time that it took to execute, and the type of statement that was run.

" + "documentation":"

The location of a manifest file that tracks file locations generated by the query, the amount of data scanned by the query, and the amount of time that it took the query to run.

" }, "QueryExecutionStatus":{ "type":"structure", @@ -1003,21 +1007,21 @@ "members":{ "OutputLocation":{ "shape":"String", - "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", "documentation":"

If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.

" } }, - "documentation":"

The location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. These are known as \"client-side settings\". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup.

" + "documentation":"

The location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. These are known as \"client-side settings\". If workgroup settings override client-side settings, then the query uses the workgroup settings.

" }, "ResultConfigurationUpdates":{ "type":"structure", "members":{ "OutputLocation":{ "shape":"String", - "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "RemoveOutputLocation":{ "shape":"BoxedBoolean", @@ -1313,7 +1317,7 @@ }, "Configuration":{ "shape":"WorkGroupConfiguration", - "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limit for the amount of data scanned per query, if it is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "Description":{ "shape":"WorkGroupDescriptionString", @@ -1331,7 +1335,7 @@ "members":{ "ResultConfiguration":{ "shape":"ResultConfiguration", - "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results.

" + "documentation":"

The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results.

" }, "EnforceWorkGroupConfiguration":{ "shape":"BoxedBoolean", @@ -1344,9 +1348,13 @@ "BytesScannedCutoffPerQuery":{ "shape":"BytesScannedCutoffValue", "documentation":"

The upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan.

" + }, + "RequesterPaysEnabled":{ + "shape":"BoxedBoolean", + "documentation":"

If set to true, allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide.

" } }, - "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limit for the amount of data scanned per query, if it is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" }, "WorkGroupConfigurationUpdates":{ "type":"structure", @@ -1370,6 +1378,10 @@ "RemoveBytesScannedCutoffPerQuery":{ "shape":"BoxedBoolean", "documentation":"

Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery

" + }, + "RequesterPaysEnabled":{ + "shape":"BoxedBoolean", + "documentation":"

If set to true, allows members assigned to a workgroup to specify Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide.

" } }, "documentation":"

The configuration information that will be updated for this workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, whether the workgroup settings override the client-side settings, and the data usage limit for the amount of bytes scanned per query, if it is specified.

" diff --git a/botocore/data/codecommit/2015-04-13/service-2.json b/botocore/data/codecommit/2015-04-13/service-2.json index 2acfaef9..77df1527 100644 --- a/botocore/data/codecommit/2015-04-13/service-2.json +++ b/botocore/data/codecommit/2015-04-13/service-2.json @@ -46,6 +46,28 @@ ], "documentation":"

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy.

" }, + "BatchGetCommits":{ + "name":"BatchGetCommits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetCommitsInput"}, + "output":{"shape":"BatchGetCommitsOutput"}, + "errors":[ + {"shape":"CommitIdsListRequiredException"}, + {"shape":"CommitIdsLimitExceededException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ], + "documentation":"

Returns information about the contents of one or more commits in a repository.

" + }, "BatchGetRepositories":{ "name":"BatchGetRepositories", "http":{ @@ -1619,6 +1641,58 @@ } } }, + "BatchGetCommitsError":{ + "type":"structure", + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"

A commit ID that either could not be found or was not in a valid format.

" + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

An error code that specifies whether the commit ID was not valid or not found.

" + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

An error message that provides detail about why the commit ID either was not found or was not valid.

" + } + }, + "documentation":"

Returns information about errors in a BatchGetCommits operation.

" + }, + "BatchGetCommitsErrorsList":{ + "type":"list", + "member":{"shape":"BatchGetCommitsError"} + }, + "BatchGetCommitsInput":{ + "type":"structure", + "required":[ + "commitIds", + "repositoryName" + ], + "members":{ + "commitIds":{ + "shape":"CommitIdsInputList", + "documentation":"

The full commit IDs of the commits to get information about.

You must supply the full SHAs of each commit. You cannot use shortened SHAs.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the commits.

" + } + } + }, + "BatchGetCommitsOutput":{ + "type":"structure", + "members":{ + "commits":{ + "shape":"CommitObjectsList", + "documentation":"

An array of commit data type objects, each of which contains information about a specified commit.

" + }, + "errors":{ + "shape":"BatchGetCommitsErrorsList", + "documentation":"

Returns any commit IDs for which information could not be found. For example, if one of the commit IDs was a shortened SHA or that commit was not found in the specified repository, the ID will return an error object with additional information.

" + } + } + }, "BatchGetRepositoriesInput":{ "type":"structure", "required":["repositoryNames"], @@ -1974,6 +2048,23 @@ "documentation":"

A commit ID was not specified.

", "exception":true }, + "CommitIdsInputList":{ + "type":"list", + "member":{"shape":"ObjectId"} + }, + "CommitIdsLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The maximum number of allowed commit IDs in a batch request is 100. Verify that your batch requests contains no more than 100 commit IDs, and then try again.

", + "exception":true + }, + "CommitIdsListRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "CommitMessageLengthExceededException":{ "type":"structure", "members":{ @@ -1982,6 +2073,10 @@ "exception":true }, "CommitName":{"type":"string"}, + "CommitObjectsList":{ + "type":"list", + "member":{"shape":"Commit"} + }, "CommitRequiredException":{ "type":"structure", "members":{ @@ -2684,6 +2779,8 @@ "documentation":"

The encryption key is not available.

", "exception":true }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, "EventDate":{"type":"timestamp"}, "ExceptionName":{"type":"string"}, "File":{ @@ -5989,5 +6086,5 @@ }, "blob":{"type":"blob"} }, - "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

Branches, by calling the following:

Files, by calling the following:

Commits, by calling the following:

Merges, by calling the following:

Pull requests, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

Triggers, by calling the following:

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" + "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

Branches, by calling the following:

Files, by calling the following:

Commits, by calling the following:

Merges, by calling the following:

Pull requests, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

Triggers, by calling the following:

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" } diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index d3b04bbd..aaa8d241 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -26,7 +26,7 @@ {"shape":"InvalidNonceException"}, {"shape":"JobNotFoundException"} ], - "documentation":"

Returns information about a specified job and whether that job has been received by the job worker. Only used for custom actions.

" + "documentation":"

Returns information about a specified job and whether that job has been received by the job worker. Used for custom actions only.

" }, "AcknowledgeThirdPartyJob":{ "name":"AcknowledgeThirdPartyJob", @@ -42,7 +42,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InvalidClientTokenException"} ], - "documentation":"

Confirms a job worker has received the specified job. Only used for partner actions.

" + "documentation":"

Confirms a job worker has received the specified job. Used for partner actions only.

" }, "CreateCustomActionType":{ "name":"CreateCustomActionType", @@ -81,7 +81,7 @@ {"shape":"InvalidTagsException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Creates a pipeline.

" + "documentation":"

Creates a pipeline.

In the pipeline structure, you must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" }, "DeleteCustomActionType":{ "name":"DeleteCustomActionType", @@ -94,7 +94,7 @@ {"shape":"ValidationException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Marks a custom action as deleted. PollForJobs for the custom action will fail after the action is marked for deletion. Only used for custom actions.

To re-create a custom action after it has been deleted you must use a string in the version field that has never been used before. This string can be an incremented version number, for example. To restore a deleted custom action, use a JSON file that is identical to the deleted action, including the original string in the version field.

" + "documentation":"

Marks a custom action as deleted. PollForJobs for the custom action fails after the action is marked for deletion. Used for custom actions only.

To re-create a custom action after it has been deleted you must use a string in the version field that has never been used before. This string can be an incremented version number, for example. To restore a deleted custom action, use a JSON file that is identical to the deleted action, including the original string in the version field.

" }, "DeletePipeline":{ "name":"DeletePipeline", @@ -121,7 +121,7 @@ {"shape":"ValidationException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Deletes a previously created webhook by name. Deleting the webhook stops AWS CodePipeline from starting a pipeline every time an external event occurs. The API will return successfully when trying to delete a webhook that is already deleted. If a deleted webhook is re-created by calling PutWebhook with the same name, it will have a different URL.

" + "documentation":"

Deletes a previously created webhook by name. Deleting the webhook stops AWS CodePipeline from starting a pipeline every time an external event occurs. The API returns successfully when trying to delete a webhook that is already deleted. If a deleted webhook is re-created by calling PutWebhook with the same name, it will have a different URL.

" }, "DeregisterWebhookWithThirdParty":{ "name":"DeregisterWebhookWithThirdParty", @@ -135,7 +135,7 @@ {"shape":"ValidationException"}, {"shape":"WebhookNotFoundException"} ], - "documentation":"

Removes the connection between the webhook that was created by CodePipeline and the external tool with events to be detected. Currently only supported for webhooks that target an action type of GitHub.

" + "documentation":"

Removes the connection between the webhook that was created by CodePipeline and the external tool with events to be detected. Currently supported only for webhooks that target an action type of GitHub.

" }, "DisableStageTransition":{ "name":"DisableStageTransition", @@ -177,7 +177,7 @@ {"shape":"ValidationException"}, {"shape":"JobNotFoundException"} ], - "documentation":"

Returns information about a job. Only used for custom actions.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

" + "documentation":"

Returns information about a job. Used for custom actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" }, "GetPipeline":{ "name":"GetPipeline", @@ -237,7 +237,7 @@ {"shape":"InvalidClientTokenException"}, {"shape":"InvalidJobException"} ], - "documentation":"

Requests the details of a job for a third party action. Only used for partner actions.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

" + "documentation":"

Requests the details of a job for a third party action. Used for partner actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" }, "ListActionExecutions":{ "name":"ListActionExecutions", @@ -312,7 +312,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Gets the set of key/value pairs (metadata) that are used to manage the resource.

" + "documentation":"

Gets the set of key-value pairs (metadata) that are used to manage the resource.

" }, "ListWebhooks":{ "name":"ListWebhooks", @@ -326,7 +326,7 @@ {"shape":"ValidationException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Gets a listing of all the webhooks in this region for this account. The output lists all webhooks and includes the webhook URL and ARN, as well the configuration for each webhook.

" + "documentation":"

Gets a listing of all the webhooks in this AWS Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook.

" }, "PollForJobs":{ "name":"PollForJobs", @@ -340,7 +340,7 @@ {"shape":"ValidationException"}, {"shape":"ActionTypeNotFoundException"} ], - "documentation":"

Returns information about any jobs for AWS CodePipeline to act upon. PollForJobs is only valid for action types with \"Custom\" in the owner field. If the action type contains \"AWS\" or \"ThirdParty\" in the owner field, the PollForJobs action returns an error.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

" + "documentation":"

Returns information about any jobs for AWS CodePipeline to act on. PollForJobs is valid only for action types with \"Custom\" in the owner field. If the action type contains \"AWS\" or \"ThirdParty\" in the owner field, the PollForJobs action returns an error.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" }, "PollForThirdPartyJobs":{ "name":"PollForThirdPartyJobs", @@ -354,7 +354,7 @@ {"shape":"ActionTypeNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Determines whether there are any third party jobs for a job worker to act on. Only used for partner actions.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts.

" + "documentation":"

Determines whether there are any third party jobs for a job worker to act on. Used for partner actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts.

" }, "PutActionRevision":{ "name":"PutActionRevision", @@ -402,7 +402,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InvalidJobStateException"} ], - "documentation":"

Represents the failure of a job as returned to the pipeline by a job worker. Only used for custom actions.

" + "documentation":"

Represents the failure of a job as returned to the pipeline by a job worker. Used for custom actions only.

" }, "PutJobSuccessResult":{ "name":"PutJobSuccessResult", @@ -416,7 +416,7 @@ {"shape":"JobNotFoundException"}, {"shape":"InvalidJobStateException"} ], - "documentation":"

Represents the success of a job as returned to the pipeline by a job worker. Only used for custom actions.

" + "documentation":"

Represents the success of a job as returned to the pipeline by a job worker. Used for custom actions only.

" }, "PutThirdPartyJobFailureResult":{ "name":"PutThirdPartyJobFailureResult", @@ -431,7 +431,7 @@ {"shape":"InvalidJobStateException"}, {"shape":"InvalidClientTokenException"} ], - "documentation":"

Represents the failure of a third party job as returned to the pipeline by a job worker. Only used for partner actions.

" + "documentation":"

Represents the failure of a third party job as returned to the pipeline by a job worker. Used for partner actions only.

" }, "PutThirdPartyJobSuccessResult":{ "name":"PutThirdPartyJobSuccessResult", @@ -446,7 +446,7 @@ {"shape":"InvalidJobStateException"}, {"shape":"InvalidClientTokenException"} ], - "documentation":"

Represents the success of a third party job as returned to the pipeline by a job worker. Only used for partner actions.

" + "documentation":"

Represents the success of a third party job as returned to the pipeline by a job worker. Used for partner actions only.

" }, "PutWebhook":{ "name":"PutWebhook", @@ -497,7 +497,7 @@ {"shape":"StageNotRetryableException"}, {"shape":"NotLatestPipelineExecutionException"} ], - "documentation":"

Resumes the pipeline execution by retrying the last failed actions in a stage.

" + "documentation":"

Resumes the pipeline execution by retrying the last failed actions in a stage. You can retry a stage immediately if any of the actions in the stage fail. When you retry, all actions that are still in progress continue working, and failed actions are triggered again.

" }, "StartPipelineExecution":{ "name":"StartPipelineExecution", @@ -564,7 +564,7 @@ {"shape":"InvalidStructureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure in conjunction with UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.

" + "documentation":"

Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure and UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.

" } }, "shapes":{ @@ -717,15 +717,15 @@ }, "secret":{ "shape":"Boolean", - "documentation":"

Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.

When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.

" + "documentation":"

Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.

When updating a pipeline, passing * * * * * without changing any other values of the action preserves the previous value of the secret.

" }, "queryable":{ "shape":"Boolean", - "documentation":"

Indicates that the property will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.

If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.

" + "documentation":"

Indicates that the property is used with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.

If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to other restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.

" }, "description":{ "shape":"Description", - "documentation":"

The description of the action configuration property that will be displayed to users.

" + "documentation":"

The description of the action configuration property that is displayed to users.

" }, "type":{ "shape":"ActionConfigurationPropertyType", @@ -763,14 +763,14 @@ "members":{ "name":{ "shape":"ActionName", - "documentation":"

The name of the action within the context of a job.

" + "documentation":"

The name of the action in the context of a job.

" }, "actionExecutionId":{ "shape":"ActionExecutionId", "documentation":"

The system-generated unique ID that corresponds to an action's execution.

" } }, - "documentation":"

Represents the context of an action within the stage of a pipeline to a job worker.

" + "documentation":"

Represents the context of an action in the stage of a pipeline to a job worker.

" }, "ActionDeclaration":{ "type":"structure", @@ -785,7 +785,7 @@ }, "actionTypeId":{ "shape":"ActionTypeId", - "documentation":"

The configuration information for the action type.

" + "documentation":"

Specifies the action type and the provider of the action.

" }, "runOrder":{ "shape":"ActionRunOrder", @@ -793,7 +793,7 @@ }, "configuration":{ "shape":"ActionConfigurationMap", - "documentation":"

The action declaration's configuration.

" + "documentation":"

The action's configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline. For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide. For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide.

The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:

JSON:

\"Configuration\" : { Key : Value },

" }, "outputArtifacts":{ "shape":"OutputArtifactList", @@ -805,7 +805,7 @@ }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.

" + "documentation":"

The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.

" }, "region":{ "shape":"AWSRegionName", @@ -831,7 +831,7 @@ }, "token":{ "shape":"ActionExecutionToken", - "documentation":"

The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState command and is used to validate that the approval request corresponding to this token is still valid.

" + "documentation":"

The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState command. It is used to validate that the approval request corresponding to this token is still valid.

" }, "lastUpdatedBy":{ "shape":"LastUpdatedBy", @@ -843,7 +843,7 @@ }, "externalExecutionUrl":{ "shape":"Url", - "documentation":"

The URL of a resource external to AWS that will be used when running the action, for example an external repository URL.

" + "documentation":"

The URL of a resource external to AWS that is used when running the action (for example, an external repository URL).

" }, "percentComplete":{ "shape":"Percentage", @@ -1022,7 +1022,7 @@ }, "revisionChangeId":{ "shape":"RevisionChangeIdentifier", - "documentation":"

The unique identifier of the change that set the state to this revision, for example a deployment ID or timestamp.

" + "documentation":"

The unique identifier of the change that set the state to this revision (for example, a deployment ID or timestamp).

" }, "created":{ "shape":"Timestamp", @@ -1108,7 +1108,7 @@ "members":{ "category":{ "shape":"ActionCategory", - "documentation":"

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.

" + "documentation":"

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

" }, "owner":{ "shape":"ActionOwner", @@ -1116,7 +1116,7 @@ }, "provider":{ "shape":"ActionProvider", - "documentation":"

The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. To reference a list of action providers by action type, see Valid Action Types and Providers in CodePipeline.

" + "documentation":"

The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline.

" }, "version":{ "shape":"Version", @@ -1145,11 +1145,11 @@ }, "entityUrlTemplate":{ "shape":"UrlTemplate", - "documentation":"

The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.

" + "documentation":"

The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.

" }, "executionUrlTemplate":{ "shape":"UrlTemplate", - "documentation":"

The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.

" + "documentation":"

The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.

" }, "revisionUrlTemplate":{ "shape":"UrlTemplate", @@ -1215,7 +1215,7 @@ "documentation":"

The location of an artifact.

" } }, - "documentation":"

Represents information about an artifact that will be worked upon by actions in the pipeline.

" + "documentation":"

Represents information about an artifact that is worked on by actions in the pipeline.

" }, "ArtifactDetail":{ "type":"structure", @@ -1286,7 +1286,7 @@ "members":{ "name":{ "shape":"ArtifactName", - "documentation":"

The name of an artifact. This name might be system-generated, such as \"MyApp\", or might be defined by the user when an action is created.

" + "documentation":"

The name of an artifact. This name might be system-generated, such as \"MyApp\", or defined by the user when an action is created.

" }, "revisionId":{ "shape":"Revision", @@ -1328,14 +1328,14 @@ }, "location":{ "shape":"ArtifactStoreLocation", - "documentation":"

The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder within the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.

" + "documentation":"

The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.

" }, "encryptionKey":{ "shape":"EncryptionKey", "documentation":"

The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.

" } }, - "documentation":"

The Amazon S3 bucket where artifacts are stored for the pipeline.

" + "documentation":"

The Amazon S3 bucket where artifacts for the pipeline are stored.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" }, "ArtifactStoreLocation":{ "type":"string", @@ -1611,11 +1611,11 @@ }, "transitionType":{ "shape":"StageTransitionType", - "documentation":"

Specifies whether artifacts will be prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).

" + "documentation":"

Specifies whether artifacts are prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).

" }, "reason":{ "shape":"DisabledReason", - "documentation":"

The reason given to the user why a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.

" + "documentation":"

The reason given to the user that a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.

" } }, "documentation":"

Represents the input of a DisableStageTransition action.

" @@ -1644,7 +1644,7 @@ }, "transitionType":{ "shape":"StageTransitionType", - "documentation":"

Specifies whether artifacts will be allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already-processed artifacts will be allowed to transition to the next stage (outbound).

" + "documentation":"

Specifies whether artifacts are allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already processed artifacts are allowed to transition to the next stage (outbound).

" } }, "documentation":"

Represents the input of an EnableStageTransition action.

" @@ -1659,7 +1659,7 @@ "members":{ "id":{ "shape":"EncryptionKeyId", - "documentation":"

The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.

" + "documentation":"

The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.

Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.

" }, "type":{ "shape":"EncryptionKeyType", @@ -1682,7 +1682,7 @@ "members":{ "code":{ "shape":"Code", - "documentation":"

The system ID or error number code of the error.

" + "documentation":"

The system ID or number code of the error.

" }, "message":{ "shape":"Message", @@ -1704,7 +1704,7 @@ }, "percentComplete":{ "shape":"Percentage", - "documentation":"

The percentage of work completed on the action, represented on a scale of zero to one hundred percent.

" + "documentation":"

The percentage of work completed on the action, represented on a scale of 0 to 100 percent.

" } }, "documentation":"

The details of the actions taken and results produced on an artifact as it passes through stages in the pipeline.

" @@ -1719,6 +1719,20 @@ "max":2048, "min":1 }, + "ExecutionTrigger":{ + "type":"structure", + "members":{ + "triggerType":{ + "shape":"TriggerType", + "documentation":"

The type of change-detection method, command, or user interaction that started a pipeline execution.

" + }, + "triggerDetail":{ + "shape":"TriggerDetail", + "documentation":"

Detail related to the event that started a pipeline execution, such as the webhook ARN of the webhook that triggered the pipeline execution or the user ARN for a user-initiated start-pipeline-execution CLI command.

" + } + }, + "documentation":"

The interaction or event that started a pipeline execution.

" + }, "ExternalExecutionId":{"type":"string"}, "ExternalExecutionSummary":{"type":"string"}, "FailureDetails":{ @@ -1809,11 +1823,11 @@ "members":{ "name":{ "shape":"PipelineName", - "documentation":"

The name of the pipeline for which you want to get information. Pipeline names must be unique under an Amazon Web Services (AWS) user account.

" + "documentation":"

The name of the pipeline for which you want to get information. Pipeline names must be unique under an AWS user account.

" }, "version":{ "shape":"PipelineVersion", - "documentation":"

The version number of the pipeline. If you do not specify a version, defaults to the most current version.

" + "documentation":"

The version number of the pipeline. If you do not specify a version, defaults to the current version.

" } }, "documentation":"

Represents the input of a GetPipeline action.

" @@ -1852,7 +1866,7 @@ }, "pipelineVersion":{ "shape":"PipelineVersion", - "documentation":"

The version number of the pipeline.

A newly-created pipeline is always assigned a version number of 1.

" + "documentation":"

The version number of the pipeline.

A newly created pipeline is always assigned a version number of 1.

" }, "stageStates":{ "shape":"StageStateList", @@ -1903,7 +1917,7 @@ "members":{ "name":{ "shape":"ArtifactName", - "documentation":"

The name of the artifact to be worked on, for example, \"My App\".

The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

" + "documentation":"

The name of the artifact to be worked on (for example, \"My App\").

The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

" } }, "documentation":"

Represents information about an artifact to be worked on, such as a test or build artifact.

" @@ -1916,7 +1930,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified action declaration was specified in an invalid format.

", + "documentation":"

The action declaration was specified in an invalid format.

", "exception":true }, "InvalidApprovalTokenException":{ @@ -1952,42 +1966,42 @@ "type":"structure", "members":{ }, - "documentation":"

The specified job was specified in an invalid format or cannot be found.

", + "documentation":"

The job was specified in an invalid format or cannot be found.

", "exception":true }, "InvalidJobStateException":{ "type":"structure", "members":{ }, - "documentation":"

The specified job state was specified in an invalid format.

", + "documentation":"

The job state was specified in an invalid format.

", "exception":true }, "InvalidNextTokenException":{ "type":"structure", "members":{ }, - "documentation":"

The next token was specified in an invalid format. Make sure that the next token you provided is the token returned by a previous call.

", + "documentation":"

The next token was specified in an invalid format. Make sure that the next token you provide is the token returned by a previous call.

", "exception":true }, "InvalidNonceException":{ "type":"structure", "members":{ }, - "documentation":"

The specified nonce was specified in an invalid format.

", + "documentation":"

The nonce was specified in an invalid format.

", "exception":true }, "InvalidStageDeclarationException":{ "type":"structure", "members":{ }, - "documentation":"

The specified stage declaration was specified in an invalid format.

", + "documentation":"

The stage declaration was specified in an invalid format.

", "exception":true }, "InvalidStructureException":{ "type":"structure", "members":{ }, - "documentation":"

The specified structure was specified in an invalid format.

", + "documentation":"

The structure was specified in an invalid format.

", "exception":true }, "InvalidTagsException":{ @@ -2021,7 +2035,7 @@ }, "data":{ "shape":"JobData", - "documentation":"

Additional data about a job.

" + "documentation":"

Other data about a job.

" }, "nonce":{ "shape":"Nonce", @@ -2047,7 +2061,7 @@ }, "pipelineContext":{ "shape":"PipelineContext", - "documentation":"

Represents information about a pipeline to a job worker.

Includes pipelineArn and pipelineExecutionId for Custom jobs.

" + "documentation":"

Represents information about a pipeline to a job worker.

Includes pipelineArn and pipelineExecutionId for custom jobs.

" }, "inputArtifacts":{ "shape":"ArtifactList", @@ -2063,14 +2077,14 @@ }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.

" + "documentation":"

A system-generated token, such as a AWS CodeDeploy deployment ID, required by a job to continue the job asynchronously.

" }, "encryptionKey":{ "shape":"EncryptionKey", "documentation":"

Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.

" } }, - "documentation":"

Represents additional information about a job required for a job worker to complete the job.

" + "documentation":"

Represents other information about a job required for a job worker to complete the job.

" }, "JobDetails":{ "type":"structure", @@ -2081,7 +2095,7 @@ }, "data":{ "shape":"JobData", - "documentation":"

Represents additional information about a job required for a job worker to complete the job.

" + "documentation":"

Represents other information about a job required for a job worker to complete the job.

" }, "accountId":{ "shape":"AccountId", @@ -2102,7 +2116,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified job was specified in an invalid format or cannot be found.

", + "documentation":"

The job was specified in an invalid format or cannot be found.

", "exception":true }, "JobStatus":{ @@ -2191,7 +2205,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list action types call to return the next set of action types in the list.

" + "documentation":"

If the amount of returned information is significantly large, an identifier is also returned. It can be used in a subsequent list action types call to return the next set of action types in the list.

" } }, "documentation":"

Represents the output of a ListActionTypes action.

" @@ -2234,7 +2248,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

An identifier that was returned from the previous list pipelines call, which can be used to return the next set of pipelines in the list.

" + "documentation":"

An identifier that was returned from the previous list pipelines call. It can be used to return the next set of pipelines in the list.

" } }, "documentation":"

Represents the input of a ListPipelines action.

" @@ -2248,7 +2262,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list pipelines call to return the next set of pipelines in the list.

" + "documentation":"

If the amount of returned information is significantly large, an identifier is also returned. It can be used in a subsequent list pipelines call to return the next set of pipelines in the list.

" } }, "documentation":"

Represents the output of a ListPipelines action.

" @@ -2263,7 +2277,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token that was returned from the previous API call, which would be used to return the next page of the list. However, the ListTagsforResource call lists all available tags in one call and does not use pagination.

" + "documentation":"

The token that was returned from the previous API call, which would be used to return the next page of the list. The ListTagsforResource call lists all available tags in one call and does not use pagination.

" }, "maxResults":{ "shape":"MaxResults", @@ -2280,7 +2294,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent API call to return the next page of the list. However, the ListTagsforResource call lists all available tags in one call and does not use pagination.

" + "documentation":"

If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent API call to return the next page of the list. The ListTagsforResource call lists all available tags in one call and does not use pagination.

" } } }, @@ -2297,7 +2311,7 @@ }, "url":{ "shape":"WebhookUrl", - "documentation":"

A unique URL generated by CodePipeline. When a POST request is made to this URL, the defined pipeline is started as long as the body of the post request satisfies the defined authentication and filtering conditions. Deleting and re-creating a webhook will make the old URL invalid and generate a new URL.

" + "documentation":"

A unique URL generated by CodePipeline. When a POST request is made to this URL, the defined pipeline is started as long as the body of the post request satisfies the defined authentication and filtering conditions. Deleting and re-creating a webhook makes the old URL invalid and generates a new one.

" }, "errorMessage":{ "shape":"WebhookErrorMessage", @@ -2431,7 +2445,7 @@ }, "action":{ "shape":"ActionContext", - "documentation":"

The context of an action to a job worker within the stage of a pipeline.

" + "documentation":"

The context of an action to a job worker in the stage of a pipeline.

" }, "pipelineArn":{ "shape":"PipelineArn", @@ -2462,11 +2476,11 @@ }, "artifactStore":{ "shape":"ArtifactStore", - "documentation":"

Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.

" + "documentation":"

Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" }, "artifactStores":{ "shape":"ArtifactStoreMap", - "documentation":"

A mapping of artifactStore objects and their corresponding regions. There must be an artifact store for the pipeline region and for each cross-region action within the pipeline. You can only use either artifactStore or artifactStores, not both.

If you create a cross-region action in your pipeline, you must use artifactStores.

" + "documentation":"

A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" }, "stages":{ "shape":"PipelineStageDeclarationList", @@ -2474,7 +2488,7 @@ }, "version":{ "shape":"PipelineVersion", - "documentation":"

The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.

" + "documentation":"

The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.

" } }, "documentation":"

Represents the structure of actions and stages to be performed in the pipeline.

" @@ -2547,6 +2561,10 @@ "sourceRevisions":{ "shape":"SourceRevisionList", "documentation":"

A list of the source artifact revisions that initiated a pipeline execution.

" + }, + "trigger":{ + "shape":"ExecutionTrigger", + "documentation":"

The interaction or event that started a pipeline execution, such as automated change detection or a StartPipelineExecution API call.

" } }, "documentation":"

Summary information about a pipeline execution.

" @@ -2594,7 +2612,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified pipeline was specified in an invalid format or cannot be found.

", + "documentation":"

The pipeline was specified in an invalid format or cannot be found.

", "exception":true }, "PipelineStageDeclarationList":{ @@ -2631,7 +2649,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified pipeline version was specified in an invalid format or cannot be found.

", + "documentation":"

The pipeline version was specified in an invalid format or cannot be found.

", "exception":true }, "PollForJobsInput":{ @@ -2648,7 +2666,7 @@ }, "queryParam":{ "shape":"QueryParamMap", - "documentation":"

A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value will be returned.

" + "documentation":"

A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value are returned.

" } }, "documentation":"

Represents the input of a PollForJobs action.

" @@ -2699,15 +2717,15 @@ "members":{ "pipelineName":{ "shape":"PipelineName", - "documentation":"

The name of the pipeline that will start processing the revision to the source.

" + "documentation":"

The name of the pipeline that starts processing the revision to the source.

" }, "stageName":{ "shape":"StageName", - "documentation":"

The name of the stage that contains the action that will act upon the revision.

" + "documentation":"

The name of the stage that contains the action that acts on the revision.

" }, "actionName":{ "shape":"ActionName", - "documentation":"

The name of the action that will process the revision.

" + "documentation":"

The name of the action that processes the revision.

" }, "actionRevision":{ "shape":"ActionRevision", @@ -2758,7 +2776,7 @@ }, "token":{ "shape":"ApprovalToken", - "documentation":"

The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState action and is used to validate that the approval request corresponding to this token is still valid.

" + "documentation":"

The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState action. It is used to validate that the approval request corresponding to this token is still valid.

" } }, "documentation":"

Represents the input of a PutApprovalResult action.

" @@ -2801,11 +2819,11 @@ }, "currentRevision":{ "shape":"CurrentRevision", - "documentation":"

The ID of the current revision of the artifact successfully worked upon by the job.

" + "documentation":"

The ID of the current revision of the artifact successfully worked on by the job.

" }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the custom action. When the action is complete, no continuation token should be supplied.

" + "documentation":"

A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the custom action. When the action is complete, no continuation token should be supplied.

" }, "executionDetails":{ "shape":"ExecutionDetails", @@ -2858,7 +2876,7 @@ }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the partner action. When the action is complete, no continuation token should be supplied.

" + "documentation":"

A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the partner action. When the action is complete, no continuation token should be supplied.

" }, "executionDetails":{ "shape":"ExecutionDetails", @@ -2873,7 +2891,7 @@ "members":{ "webhook":{ "shape":"WebhookDefinition", - "documentation":"

The detail provided in an input file to create the webhook, such as the webhook name, the pipeline name, and the action name. Give the webhook a unique name which identifies the webhook being defined. You may choose to name the webhook after the pipeline and action it targets so that you can easily recognize what it's used for later.

" + "documentation":"

The detail provided in an input file to create the webhook, such as the webhook name, the pipeline name, and the action name. Give the webhook a unique name that helps you identify it. You might name the webhook after the pipeline and action it targets so that you can easily recognize what it's used for later.

" }, "tags":{ "shape":"TagList", @@ -2919,7 +2937,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified resource was specified in an invalid format.

", + "documentation":"

The resource was specified in an invalid format.

", "exception":true }, "RetryStageExecutionInput":{ @@ -3129,14 +3147,14 @@ "type":"structure", "members":{ }, - "documentation":"

The specified stage was specified in an invalid format or cannot be found.

", + "documentation":"

The stage was specified in an invalid format or cannot be found.

", "exception":true }, "StageNotRetryableException":{ "type":"structure", "members":{ }, - "documentation":"

The specified stage can't be retried because the pipeline structure or stage state changed after the stage was not completed; the stage contains no failed actions; one or more actions are still in progress; or another retry attempt is already in progress.

", + "documentation":"

Unable to retry. The pipeline structure or stage state might have changed while actions awaited retry, or the stage contains no failed actions.

", "exception":true }, "StageRetryMode":{ @@ -3218,7 +3236,7 @@ "documentation":"

The tag's value.

" } }, - "documentation":"

A tag is a key/value pair that is used to manage the resource.

" + "documentation":"

A tag is a key-value pair that is used to manage the resource.

" }, "TagKey":{ "type":"string", @@ -3272,7 +3290,7 @@ "documentation":"

The identifier used to identify the job in AWS CodePipeline.

" } }, - "documentation":"

A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked upon by a partner action.

" + "documentation":"

A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked on by a partner action.

" }, "ThirdPartyJobData":{ "type":"structure", @@ -3291,11 +3309,11 @@ }, "inputArtifacts":{ "shape":"ArtifactList", - "documentation":"

The name of the artifact that will be worked upon by the action, if any. This name might be system-generated, such as \"MyApp\", or might be defined by the user when the action is created. The input artifact name must match the name of an output artifact generated by an action in an earlier action or stage of the pipeline.

" + "documentation":"

The name of the artifact that is worked on by the action, if any. This name might be system-generated, such as \"MyApp\", or it might be defined by the user when the action is created. The input artifact name must match the name of an output artifact generated by an action in an earlier action or stage of the pipeline.

" }, "outputArtifacts":{ "shape":"ArtifactList", - "documentation":"

The name of the artifact that will be the result of the action, if any. This name might be system-generated, such as \"MyBuiltApp\", or might be defined by the user when the action is created.

" + "documentation":"

The name of the artifact that is the result of the action, if any. This name might be system-generated, such as \"MyBuiltApp\", or it might be defined by the user when the action is created.

" }, "artifactCredentials":{ "shape":"AWSSessionCredentials", @@ -3303,7 +3321,7 @@ }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.

" + "documentation":"

A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires to continue the job asynchronously.

" }, "encryptionKey":{ "shape":"EncryptionKey", @@ -3371,6 +3389,22 @@ }, "documentation":"

Represents information about the state of transitions between one stage and another stage.

" }, + "TriggerDetail":{ + "type":"string", + "max":1024, + "min":0 + }, + "TriggerType":{ + "type":"string", + "enum":[ + "CreatePipeline", + "StartPipelineExecution", + "PollForSourceChanges", + "Webhook", + "CloudWatchEvent", + "PutActionRevision" + ] + }, "UntagResourceInput":{ "type":"structure", "required":[ @@ -3443,7 +3477,7 @@ "members":{ "AllowedIPRange":{ "shape":"WebhookAuthConfigurationAllowedIPRange", - "documentation":"

The property used to configure acceptance of webhooks within a specific IP range. For IP, only the AllowedIPRange property must be set, and this property must be set to a valid CIDR range.

" + "documentation":"

The property used to configure acceptance of webhooks in an IP address range. For IP, only the AllowedIPRange property must be set. This property must be set to a valid CIDR range.

" }, "SecretToken":{ "shape":"WebhookAuthConfigurationSecretToken", @@ -3499,7 +3533,7 @@ }, "authentication":{ "shape":"WebhookAuthenticationType", - "documentation":"

Supported options are GITHUB_HMAC, IP and UNAUTHENTICATED.

" + "documentation":"

Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.

" }, "authenticationConfiguration":{ "shape":"WebhookAuthConfiguration", @@ -3516,11 +3550,11 @@ "members":{ "jsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath expression that will be applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the MatchEquals field, otherwise the request will be ignored. For more information about JsonPath expressions, see Java JsonPath implementation in GitHub.

" + "documentation":"

A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the MatchEquals field. Otherwise, the request is ignored. For more information, see Java JsonPath implementation in GitHub.

" }, "matchEquals":{ "shape":"MatchEquals", - "documentation":"

The value selected by the JsonPath expression must match what is supplied in the MatchEquals field, otherwise the request will be ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly braces. For example, if the value supplied here is \"refs/heads/{Branch}\" and the target action has an action configuration property called \"Branch\" with a value of \"master\", the MatchEquals value will be evaluated as \"refs/heads/master\". For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements.

" + "documentation":"

The value selected by the JsonPath expression must match what is supplied in the MatchEquals field. Otherwise, the request is ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly brackets. For example, if the value supplied here is \"refs/heads/{Branch}\" and the target action has an action configuration property called \"Branch\" with a value of \"master\", the MatchEquals value is evaluated as \"refs/heads/master\". For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements.

" } }, "documentation":"

The event criteria that specify when a webhook notification is sent to your URL.

" @@ -3554,5 +3588,5 @@ "min":1 } }, - "documentation":"AWS CodePipeline

Overview

This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline is only configurable through the API. For additional information, see the AWS CodePipeline User Guide.

You can use the AWS CodePipeline API to work with pipelines, stages, actions, and transitions, as described below.

Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions.

You can work with pipelines by calling:

Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage will result in success or failure. If a stage fails, then the pipeline stops at that stage and will remain stopped until either a new version of an artifact appears in the source location, or a user takes action to re-run the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, also refer to the AWS CodePipeline Pipeline Structure Reference.

Pipeline stages include actions, which are categorized into categories such as source or build actions performed within a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are:

Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

You can work with transitions by calling:

Using the API to integrate with AWS CodePipeline

For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. In order to integrate with AWS CodePipeline, developers will need to work with the following items:

Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

You can work with jobs by calling:

Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

You can work with third party jobs by calling:

" + "documentation":"AWS CodePipeline

Overview

This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the AWS CodePipeline User Guide.

You can use the AWS CodePipeline API to work with pipelines, stages, actions, and transitions.

Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions.

You can work with pipelines by calling:

Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see AWS CodePipeline Pipeline Structure Reference.

Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are:

Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

You can work with transitions by calling:

Using the API to integrate with AWS CodePipeline

For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. To integrate with AWS CodePipeline, developers need to work with the following items:

Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

You can work with jobs by calling:

Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

You can work with third party jobs by calling:

" } diff --git a/botocore/data/comprehendmedical/2018-10-30/service-2.json b/botocore/data/comprehendmedical/2018-10-30/service-2.json index 29759576..372eb851 100644 --- a/botocore/data/comprehendmedical/2018-10-30/service-2.json +++ b/botocore/data/comprehendmedical/2018-10-30/service-2.json @@ -14,6 +14,38 @@ "uid":"comprehendmedical-2018-10-30" }, "operations":{ + "DescribeEntitiesDetectionV2Job":{ + "name":"DescribeEntitiesDetectionV2Job", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEntitiesDetectionV2JobRequest"}, + "output":{"shape":"DescribeEntitiesDetectionV2JobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with a medical entities detection job. Use this operation to get the status of a detection job.

" + }, + "DescribePHIDetectionJob":{ + "name":"DescribePHIDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePHIDetectionJobRequest"}, + "output":{"shape":"DescribePHIDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with a protected health information (PHI) detection job. Use this operation to get the status of a detection job.

" + }, "DetectEntities":{ "name":"DetectEntities", "http":{ @@ -30,7 +62,27 @@ {"shape":"InvalidEncodingException"}, {"shape":"TextSizeLimitExceededException"} ], - "documentation":"

Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information .

" + "documentation":"

The DetectEntities operation is deprecated. You should use the DetectEntitiesV2 operation instead.

Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information .

", + "deprecated":true, + "deprecatedMessage":"This operation is deprecated, use DetectEntitiesV2 instead." + }, + "DetectEntitiesV2":{ + "name":"DetectEntitiesV2", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetectEntitiesV2Request"}, + "output":{"shape":"DetectEntitiesV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidEncodingException"}, + {"shape":"TextSizeLimitExceededException"} + ], + "documentation":"

Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information.

The DetectEntitiesV2 operation replaces the DetectEntities operation. This new action uses a different model for determining the entities in your medical text and changes the way that some entities are returned in the output. You should use the DetectEntitiesV2 operation in all new applications.

The DetectEntitiesV2 operation returns the Acuity and Direction entities as attributes instead of types. It does not return the Quality or Quantity entities.

" }, "DetectPHI":{ "name":"DetectPHI", @@ -48,10 +100,105 @@ {"shape":"InvalidEncodingException"}, {"shape":"TextSizeLimitExceededException"} ], - "documentation":"

Inspects the clinical text for personal health information (PHI) entities and entity category, location, and confidence score on that information.

" + "documentation":"

Inspects the clinical text for protected health information (PHI) entities and entity category, location, and confidence score on that information.

" + }, + "ListEntitiesDetectionV2Jobs":{ + "name":"ListEntitiesDetectionV2Jobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEntitiesDetectionV2JobsRequest"}, + "output":{"shape":"ListEntitiesDetectionV2JobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of medical entity detection jobs that you have submitted.

" + }, + "ListPHIDetectionJobs":{ + "name":"ListPHIDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPHIDetectionJobsRequest"}, + "output":{"shape":"ListPHIDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of protected health information (PHI) detection jobs that you have submitted.

" + }, + "StartEntitiesDetectionV2Job":{ + "name":"StartEntitiesDetectionV2Job", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartEntitiesDetectionV2JobRequest"}, + "output":{"shape":"StartEntitiesDetectionV2JobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous medical entity detection job for a collection of documents. Use the DescribeEntitiesDetectionV2Job operation to track the status of a job.

" + }, + "StartPHIDetectionJob":{ + "name":"StartPHIDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartPHIDetectionJobRequest"}, + "output":{"shape":"StartPHIDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous job to detect protected health information (PHI). Use the DescribePHIDetectionJob operation to track the status of a job.

" + }, + "StopEntitiesDetectionV2Job":{ + "name":"StopEntitiesDetectionV2Job", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopEntitiesDetectionV2JobRequest"}, + "output":{"shape":"StopEntitiesDetectionV2JobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops a medical entities detection job in progress.

" + }, + "StopPHIDetectionJob":{ + "name":"StopPHIDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopPHIDetectionJobRequest"}, + "output":{"shape":"StopPHIDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops a protected health information (PHI) detection job in progress.

" } }, "shapes":{ + "AnyLengthString":{"type":"string"}, "Attribute":{ "type":"structure", "members":{ @@ -61,11 +208,11 @@ }, "Score":{ "shape":"Float", - "documentation":"

The level of confidence that Comprehend Medical has that the segment of text is correctly recognized as an attribute.

" + "documentation":"

The level of confidence that Amazon Comprehend Medical has that the segment of text is correctly recognized as an attribute.

" }, "RelationshipScore":{ "shape":"Float", - "documentation":"

The level of confidence that Comprehend Medical has that this attribute is correctly related to this entity.

" + "documentation":"

The level of confidence that Amazon Comprehend Medical has that this attribute is correctly related to this entity.

" }, "Id":{ "shape":"Integer", @@ -108,6 +255,138 @@ "max":20000, "min":1 }, + "ClientRequestTokenString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ComprehendMedicalAsyncJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters on the name of the job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based on job status. Returns only jobs with the specified status.

" + }, + "SubmitTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmitTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of detection jobs.

" + }, + "ComprehendMedicalAsyncJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier assigned to the detection job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name that you assigned to the detection job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The current status of the detection job. If the status is FAILED, the Message field shows the reason for the failure.

" + }, + "Message":{ + "shape":"AnyLengthString", + "documentation":"

A description of the status of a job.

" + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

The time that the detection job was submitted for processing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the detection job completed.

" + }, + "ExpirationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that job metadata is deleted from the server. Output files in your S3 bucket will not be deleted. After the metadata is deleted, the job will no longer appear in the results of the ListEntitiesDetectionV2Job or the ListPHIDetectionJobs operation.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data configuration that you supplied when you created the detection job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data configuration that you supplied when you created the detection job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the input documents.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) that gives Amazon Comprehend Medical read access to your input data.

" + }, + "ManifestFilePath":{ + "shape":"ManifestFilePath", + "documentation":"

The path to the file that describes the results of a batch job.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The AWS Key Management Service key, if any, used to encrypt the output files.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model used to analyze the documents. The version number looks like X.X.X. You can use this information to track the model used for a particular batch of documents.

" + } + }, + "documentation":"

Provides information about a detection job.

" + }, + "ComprehendMedicalAsyncJobPropertiesList":{ + "type":"list", + "member":{"shape":"ComprehendMedicalAsyncJobProperties"} + }, + "DescribeEntitiesDetectionV2JobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend Medical generated for the job. The StartEntitiesDetectionV2Job operation returns this identifier in its response.

" + } + } + }, + "DescribeEntitiesDetectionV2JobResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobProperties":{ + "shape":"ComprehendMedicalAsyncJobProperties", + "documentation":"

An object that contains the properties associated with a detection job.

" + } + } + }, + "DescribePHIDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend Medical generated for the job. The StartPHIDetectionJob operation returns this identifier in its response.

" + } + } + }, + "DescribePHIDetectionJobResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobProperties":{ + "shape":"ComprehendMedicalAsyncJobProperties", + "documentation":"

An object that contains the properties associated with a detection job.

" + } + } + }, "DetectEntitiesRequest":{ "type":"structure", "required":["Text"], @@ -120,11 +399,14 @@ }, "DetectEntitiesResponse":{ "type":"structure", - "required":["Entities"], + "required":[ + "Entities", + "ModelVersion" + ], "members":{ "Entities":{ "shape":"EntityList", - "documentation":"

The collection of medical entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Comprehend Medical has in the detection and analysis. Attributes and traits of the entity are also returned.

" + "documentation":"

The collection of medical entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Amazon Comprehend Medical has in the detection and analysis. Attributes and traits of the entity are also returned.

" }, "UnmappedAttributes":{ "shape":"UnmappedAttributeList", @@ -132,7 +414,46 @@ }, "PaginationToken":{ "shape":"String", - "documentation":"

If the result of the previous request to DetectEntities was truncated, include the Paginationtoken to fetch the next page of entities.

" + "documentation":"

If the result of the previous request to DetectEntities was truncated, include the PaginationToken to fetch the next page of entities.

" + }, + "ModelVersion":{ + "shape":"String", + "documentation":"

The version of the model used to analyze the documents. The version number looks like X.X.X. You can use this information to track the model used for a particular batch of documents.

" + } + } + }, + "DetectEntitiesV2Request":{ + "type":"structure", + "required":["Text"], + "members":{ + "Text":{ + "shape":"BoundedLengthString", + "documentation":"

A UTF-8 string containing the clinical content being examined for entities. Each string must contain fewer than 20,000 bytes of characters.

" + } + } + }, + "DetectEntitiesV2Response":{ + "type":"structure", + "required":[ + "Entities", + "ModelVersion" + ], + "members":{ + "Entities":{ + "shape":"EntityList", + "documentation":"

The collection of medical entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence in the detection and analysis. Attributes and traits of the entity are also returned.

" + }, + "UnmappedAttributes":{ + "shape":"UnmappedAttributeList", + "documentation":"

Attributes extracted from the input text that couldn't be related to an entity.

" + }, + "PaginationToken":{ + "shape":"String", + "documentation":"

If the result to the DetectEntitiesV2 operation was truncated, include the PaginationToken to fetch the next page of entities.

" + }, + "ModelVersion":{ + "shape":"String", + "documentation":"

The version of the model used to analyze the documents. The version number looks like X.X.X. You can use this information to track the model used for a particular batch of documents.

" } } }, @@ -148,15 +469,22 @@ }, "DetectPHIResponse":{ "type":"structure", - "required":["Entities"], + "required":[ + "Entities", + "ModelVersion" + ], "members":{ "Entities":{ "shape":"EntityList", - "documentation":"

The collection of PHI entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Comprehend Medical has in its detection.

" + "documentation":"

The collection of PHI entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Amazon Comprehend Medical has in its detection.

" }, "PaginationToken":{ "shape":"String", - "documentation":"

If the result of the previous request to DetectPHI was truncated, include the Paginationtoken to fetch the next page of PHI entities.

" + "documentation":"

If the result of the previous request to DetectPHI was truncated, include the PaginationToken to fetch the next page of PHI entities.

" + }, + "ModelVersion":{ + "shape":"String", + "documentation":"

The version of the model used to analyze the documents. The version number looks like X.X.X. You can use this information to track the model used for a particular batch of documents.

" } } }, @@ -177,7 +505,7 @@ }, "Score":{ "shape":"Float", - "documentation":"

The level of confidence that Comprehend Medical has in the accuracy of the detection.

" + "documentation":"

The level of confidence that Amazon Comprehend Medical has in the accuracy of the detection.

" }, "Text":{ "shape":"String", @@ -250,6 +578,27 @@ ] }, "Float":{"type":"float"}, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + }, + "InputDataConfig":{ + "type":"structure", + "required":["S3Bucket"], + "members":{ + "S3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The URI of the S3 bucket that contains the input data. The bucket must be in the same region as the API endpoint that you are calling.

Each file in the document collection must be less than 40 KB. You can store a maximum of 30 GB in the bucket.

" + }, + "S3Key":{ + "shape":"S3Key", + "documentation":"

The path to the input data files in the S3 bucket.

" + } + }, + "documentation":"

The input properties for an entities detection job

" + }, "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", @@ -276,14 +625,289 @@ "documentation":"

The request that you made is invalid. Check your request to determine why it's invalid and then retry the request.

", "exception":true }, + "JobId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "JobName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "JobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "PARTIAL_SUCCESS", + "FAILED", + "STOP_REQUESTED", + "STOPPED" + ] + }, + "KMSKey":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*" + }, + "LanguageCode":{ + "type":"string", + "enum":["en"] + }, + "ListEntitiesDetectionV2JobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"ComprehendMedicalAsyncJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListEntitiesDetectionV2JobsResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobPropertiesList":{ + "shape":"ComprehendMedicalAsyncJobPropertiesList", + "documentation":"

A list containing the properties of each job returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, + "ListPHIDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"ComprehendMedicalAsyncJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListPHIDetectionJobsResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobPropertiesList":{ + "shape":"ComprehendMedicalAsyncJobPropertiesList", + "documentation":"

A list containing the properties of each job returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, + "ManifestFilePath":{ + "type":"string", + "max":4096, + "min":1 + }, + "MaxResultsInteger":{ + "type":"integer", + "max":500, + "min":1 + }, + "ModelVersion":{"type":"string"}, + "OutputDataConfig":{ + "type":"structure", + "required":["S3Bucket"], + "members":{ + "S3Bucket":{ + "shape":"S3Bucket", + "documentation":"

When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output.

" + }, + "S3Key":{ + "shape":"S3Key", + "documentation":"

The path to the output data files in the S3 bucket. Amazon Comprehend Medical creates an output directory using the job ID so that the output from one job does not overwrite the output of another.

" + } + }, + "documentation":"

The output properties for a detection job.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The resource identified by the specified Amazon Resource Name (ARN) was not found. Check the ARN and try your request again.

", + "exception":true + }, + "S3Bucket":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[0-9a-z\\.\\-_]*(?!\\.)$" + }, + "S3Key":{ + "type":"string", + "max":1024, + "pattern":".*" + }, "ServiceUnavailableException":{ "type":"structure", "members":{ "Message":{"shape":"String"} }, - "documentation":"

The Comprehend Medical service is temporarily unavailable. Please wait and then retry your request.

", + "documentation":"

The Amazon Comprehend Medical service is temporarily unavailable. Please wait and then retry your request.

", "exception":true }, + "StartEntitiesDetectionV2JobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.

", + "idempotencyToken":true + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. All documents must be in the same language.

" + } + } + }, + "StartEntitiesDetectionV2JobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this identifier with the DescribeEntitiesDetectionV2Job operation.

" + } + } + }, + "StartPHIDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.

", + "idempotencyToken":true + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. All documents must be in the same language.

" + } + } + }, + "StartPHIDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this identifier with the DescribePHIDetectionJob operation.

" + } + } + }, + "StopEntitiesDetectionV2JobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the medical entities job to stop.

" + } + } + }, + "StopEntitiesDetectionV2JobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the medical entities detection job that was stopped.

" + } + } + }, + "StopPHIDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the PHI detection job to stop.

" + } + } + }, + "StopPHIDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the PHI detection job that was stopped.

" + } + } + }, "String":{ "type":"string", "min":1 @@ -296,6 +920,7 @@ "documentation":"

The size of the text you submitted exceeds the size limit. Reduce the size of the text or use a smaller document and then retry your request.

", "exception":true }, + "Timestamp":{"type":"timestamp"}, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -313,7 +938,7 @@ }, "Score":{ "shape":"Float", - "documentation":"

The level of confidence that Comprehend Medical has in the accuracy of this trait.

" + "documentation":"

The level of confidence that Amazon Comprehend Medical has in the accuracy of this trait.

" } }, "documentation":"

Provides contextual information about the extracted entity.

" @@ -327,7 +952,7 @@ "members":{ "Type":{ "shape":"EntityType", - "documentation":"

The type of the attribute, could be one of the following values: \"MEDICATION\", \"MEDICAL_CONDITION\", \"ANATOMY\", \"TEST_AND_TREATMENT_PROCEDURE\" or \"PERSONAL_HEALTH_INFORMATION\".

" + "documentation":"

The type of the attribute, could be one of the following values: \"MEDICATION\", \"MEDICAL_CONDITION\", \"ANATOMY\", \"TEST_AND_TREATMENT_PROCEDURE\" or \"PROTECTED_HEALTH_INFORMATION\".

" }, "Attribute":{ "shape":"Attribute", @@ -339,7 +964,15 @@ "UnmappedAttributeList":{ "type":"list", "member":{"shape":"UnmappedAttribute"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The filter that you specified for the operation is invalid. Check the filter values that you entered and try your request again.

", + "exception":true } }, - "documentation":"

Comprehend Medical extracts structured information from unstructured clinical text. Use these actions to gain insight in your documents.

" + "documentation":"

Amazon Comprehend Medical extracts structured information from unstructured clinical text. Use these actions to gain insight in your documents.

" } diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index 3395b198..54c03639 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -128,7 +128,8 @@ {"shape":"NoSuchOrganizationConfigRuleException"}, {"shape":"ResourceInUseException"}, {"shape":"OrganizationAccessDeniedException"} - ] + ], + "documentation":"

Deletes the specified organization config rule and all of its evaluation results from all member accounts in that organization. Only a master account can delete an organization config rule.

AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a rule while it is in this state.

" }, "DeletePendingAggregationRequest":{ "name":"DeletePendingAggregationRequest", @@ -156,6 +157,19 @@ ], "documentation":"

Deletes the remediation configuration.

" }, + "DeleteRemediationExceptions":{ + "name":"DeleteRemediationExceptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRemediationExceptionsRequest"}, + "output":{"shape":"DeleteRemediationExceptionsResponse"}, + "errors":[ + {"shape":"NoSuchRemediationExceptionException"} + ], + "documentation":"

Deletes one or more remediation exceptions mentioned in the resource keys.

" + }, "DeleteRetentionConfiguration":{ "name":"DeleteRetentionConfiguration", "http":{ @@ -198,7 +212,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NoSuchConfigurationAggregatorException"} ], - "documentation":"

Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant rules.

The results can return an empty result page, but if you have a nextToken, the results are displayed on the next page.

" + "documentation":"

Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant rules.

The results can return an empty result page, but if you have a nextToken, the results are displayed on the next page.

" }, "DescribeAggregationAuthorizations":{ "name":"DescribeAggregationAuthorizations", @@ -370,7 +384,8 @@ {"shape":"InvalidLimitException"}, {"shape":"InvalidNextTokenException"}, {"shape":"OrganizationAccessDeniedException"} - ] + ], + "documentation":"

Provides organization config rule deployment status for an organization.

The status is not considered successful until organization config rule is successfully deployed in all the member accounts with an exception of excluded accounts.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

" }, "DescribeOrganizationConfigRules":{ "name":"DescribeOrganizationConfigRules", @@ -385,7 +400,8 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InvalidLimitException"}, {"shape":"OrganizationAccessDeniedException"} - ] + ], + "documentation":"

Returns a list of organization config rules.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

" }, "DescribePendingAggregationRequests":{ "name":"DescribePendingAggregationRequests", @@ -412,6 +428,20 @@ "output":{"shape":"DescribeRemediationConfigurationsResponse"}, "documentation":"

Returns the details of one or more remediation configurations.

" }, + "DescribeRemediationExceptions":{ + "name":"DescribeRemediationExceptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRemediationExceptionsRequest"}, + "output":{"shape":"DescribeRemediationExceptionsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Returns the details of one or more remediation exceptions. A detailed view of a remediation exception for a set of resources that includes an explanation of an exception and the time when the exception will be deleted. When you specify the limit and the next token, you receive a paginated response.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you request resources in batch. It is only applicable, when you request all resources.

" + }, "DescribeRemediationExecutionStatus":{ "name":"DescribeRemediationExecutionStatus", "http":{ @@ -421,7 +451,8 @@ "input":{"shape":"DescribeRemediationExecutionStatusRequest"}, "output":{"shape":"DescribeRemediationExecutionStatusResponse"}, "errors":[ - {"shape":"NoSuchRemediationConfigurationException"} + {"shape":"NoSuchRemediationConfigurationException"}, + {"shape":"InvalidNextTokenException"} ], "documentation":"

Provides a detailed view of a Remediation Execution for a set of resources including state, timestamps for when steps for the remediation execution occur, and any error messages for steps that have failed. When you specify the limit and the next token, you receive a paginated response.

" }, @@ -454,7 +485,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NoSuchConfigurationAggregatorException"} ], - "documentation":"

Returns the evaluation results for the specified AWS Config rule for a specific resource in a rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

The results can return an empty result page. But if you have a nextToken, the results are displayed on the next page.

" + "documentation":"

Returns the evaluation results for the specified AWS Config rule for a specific resource in a rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

The results can return an empty result page. But if you have a nextToken, the results are displayed on the next page.

" }, "GetAggregateConfigRuleComplianceSummary":{ "name":"GetAggregateConfigRuleComplianceSummary", @@ -582,7 +613,8 @@ {"shape":"InvalidLimitException"}, {"shape":"InvalidNextTokenException"}, {"shape":"OrganizationAccessDeniedException"} - ] + ], + "documentation":"

Returns detailed status for each member account within an organization for a given organization config rule.

Only a master account can call this API.

" }, "GetResourceConfigHistory":{ "name":"GetResourceConfigHistory", @@ -762,7 +794,8 @@ {"shape":"NoAvailableOrganizationException"}, {"shape":"OrganizationAllFeaturesNotEnabledException"}, {"shape":"InsufficientPermissionsException"} - ] + ], + "documentation":"

Adds or updates organization config rule for your entire organization evaluating whether your AWS resources comply with your desired configurations. Only a master account can create or update an organization config rule.

This API enables organization service access through the EnableAWSServiceAccess action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole action.

You can use this action to create both custom AWS Config rules and AWS managed Config rules. If you are adding a new custom AWS Config rule, you must first create AWS Lambda function in the master account that the rule invokes to evaluate your resources. When you use the PutOrganizationConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed Config rule, specify the rule's identifier for the RuleIdentifier key.

The maximum number of organization config rules that AWS Config supports is 150.

Specify either OrganizationCustomRuleMetadata or OrganizationManagedRuleMetadata.

" }, "PutRemediationConfigurations":{ "name":"PutRemediationConfigurations", @@ -778,6 +811,19 @@ ], "documentation":"

Adds or updates the remediation configuration with a specific AWS Config rule with the selected target or action. The API creates the RemediationConfiguration object for the AWS Config rule. The AWS Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

" }, + "PutRemediationExceptions":{ + "name":"PutRemediationExceptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRemediationExceptionsRequest"}, + "output":{"shape":"PutRemediationExceptionsResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an exisiting exception for a specific resource with a specific AWS Config rule.

" + }, "PutRetentionConfiguration":{ "name":"PutRetentionConfiguration", "http":{ @@ -845,6 +891,7 @@ "input":{"shape":"StartRemediationExecutionRequest"}, "output":{"shape":"StartRemediationExecutionResponse"}, "errors":[ + {"shape":"InvalidParameterValueException"}, {"shape":"InsufficientPermissionsException"}, {"shape":"NoSuchRemediationConfigurationException"} ], @@ -1135,6 +1182,18 @@ "max":256, "min":1 }, + "AutoRemediationAttemptSeconds":{ + "type":"long", + "box":true, + "max":2678000, + "min":1 + }, + "AutoRemediationAttempts":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, "AvailabilityZone":{"type":"string"}, "AwsRegion":{ "type":"string", @@ -1579,12 +1638,13 @@ }, "ConfigRuleName":{ "type":"string", - "max":64, - "min":1 + "max":128, + "min":1, + "pattern":".*\\S.*" }, "ConfigRuleNames":{ "type":"list", - "member":{"shape":"StringWithCharLimit64"}, + "member":{"shape":"ConfigRuleName"}, "max":25, "min":0 }, @@ -1934,7 +1994,10 @@ "type":"structure", "required":["OrganizationConfigRuleName"], "members":{ - "OrganizationConfigRuleName":{"shape":"StringWithCharLimit64"} + "OrganizationConfigRuleName":{ + "shape":"OrganizationConfigRuleName", + "documentation":"

The name of organization config rule that you want to delete.

" + } } }, "DeletePendingAggregationRequestRequest":{ @@ -1959,7 +2022,7 @@ "required":["ConfigRuleName"], "members":{ "ConfigRuleName":{ - "shape":"StringWithCharLimit64", + "shape":"ConfigRuleName", "documentation":"

The name of the AWS Config rule for which you want to delete remediation configuration.

" }, "ResourceType":{ @@ -1973,6 +2036,32 @@ "members":{ } }, + "DeleteRemediationExceptionsRequest":{ + "type":"structure", + "required":[ + "ConfigRuleName", + "ResourceKeys" + ], + "members":{ + "ConfigRuleName":{ + "shape":"ConfigRuleName", + "documentation":"

The name of the AWS Config rule for which you want to delete remediation exception configuration.

" + }, + "ResourceKeys":{ + "shape":"RemediationExceptionResourceKeys", + "documentation":"

An exception list of resource exception keys to be processed with the current request. AWS Config adds exception for each resource key. For example, AWS Config adds 3 exceptions for 3 resource keys.

" + } + } + }, + "DeleteRemediationExceptionsResponse":{ + "type":"structure", + "members":{ + "FailedBatches":{ + "shape":"FailedDeleteRemediationExceptionsBatches", + "documentation":"

Returns a list of failed delete remediation exceptions batch objects. Each object in the batch consists of a list of failed items and failure messages.

" + } + } + }, "DeleteRetentionConfigurationRequest":{ "type":"structure", "required":["RetentionConfigurationName"], @@ -2090,7 +2179,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2103,7 +2192,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2116,7 +2205,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2129,7 +2218,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2279,7 +2368,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" }, "Limit":{ "shape":"Limit", @@ -2296,7 +2385,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2309,7 +2398,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" }, "Limit":{ "shape":"Limit", @@ -2326,7 +2415,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2413,31 +2502,61 @@ "DescribeOrganizationConfigRuleStatusesRequest":{ "type":"structure", "members":{ - "OrganizationConfigRuleNames":{"shape":"OrganizationConfigRuleNames"}, - "Limit":{"shape":"CosmosPageLimit"}, - "NextToken":{"shape":"String"} + "OrganizationConfigRuleNames":{ + "shape":"OrganizationConfigRuleNames", + "documentation":"

The names of organization config rules for which you want status details. If you do not specify any names, AWS Config returns details for all your organization AWS Confg rules.

" + }, + "Limit":{ + "shape":"CosmosPageLimit", + "documentation":"

The maximum number of OrganizationConfigRuleStatuses returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } } }, "DescribeOrganizationConfigRuleStatusesResponse":{ "type":"structure", "members":{ - "OrganizationConfigRuleStatuses":{"shape":"OrganizationConfigRuleStatuses"}, - "NextToken":{"shape":"String"} + "OrganizationConfigRuleStatuses":{ + "shape":"OrganizationConfigRuleStatuses", + "documentation":"

A list of OrganizationConfigRuleStatus objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } } }, "DescribeOrganizationConfigRulesRequest":{ "type":"structure", "members":{ - "OrganizationConfigRuleNames":{"shape":"OrganizationConfigRuleNames"}, - "Limit":{"shape":"CosmosPageLimit"}, - "NextToken":{"shape":"String"} + "OrganizationConfigRuleNames":{ + "shape":"OrganizationConfigRuleNames", + "documentation":"

The names of organization config rules for which you want details. If you do not specify any names, AWS Config returns details for all your organization config rules.

" + }, + "Limit":{ + "shape":"CosmosPageLimit", + "documentation":"

The maximum number of organization config rules returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } } }, "DescribeOrganizationConfigRulesResponse":{ "type":"structure", "members":{ - "OrganizationConfigRules":{"shape":"OrganizationConfigRules"}, - "NextToken":{"shape":"String"} + "OrganizationConfigRules":{ + "shape":"OrganizationConfigRules", + "documentation":"

Retuns a list OrganizationConfigRule objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } } }, "DescribePendingAggregationRequestsLimit":{ @@ -2454,7 +2573,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2467,7 +2586,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2490,12 +2609,47 @@ } } }, + "DescribeRemediationExceptionsRequest":{ + "type":"structure", + "required":["ConfigRuleName"], + "members":{ + "ConfigRuleName":{ + "shape":"ConfigRuleName", + "documentation":"

The name of the AWS Config rule.

" + }, + "ResourceKeys":{ + "shape":"RemediationExceptionResourceKeys", + "documentation":"

An exception list of resource exception keys to be processed with the current request. AWS Config adds exception for each resource key. For example, AWS Config adds 3 exceptions for 3 resource keys.

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The maximum number of RemediationExceptionResourceKey returned on each page. The default is 25. If you specify 0, AWS Config uses the default.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

" + } + } + }, + "DescribeRemediationExceptionsResponse":{ + "type":"structure", + "members":{ + "RemediationExceptions":{ + "shape":"RemediationExceptions", + "documentation":"

Returns a list of remediation exception objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

" + } + } + }, "DescribeRemediationExecutionStatusRequest":{ "type":"structure", "required":["ConfigRuleName"], "members":{ "ConfigRuleName":{ - "shape":"StringWithCharLimit64", + "shape":"ConfigRuleName", "documentation":"

A list of AWS Config rule names.

" }, "ResourceKeys":{ @@ -2507,7 +2661,7 @@ "documentation":"

The maximum number of RemediationExecutionStatuses returned on each page. The default is maximum. If you specify 0, AWS Config uses the default.

" }, "NextToken":{ - "shape":"StringWithCharLimit256", + "shape":"String", "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } @@ -2520,7 +2674,7 @@ "documentation":"

Returns a list of remediation execution statuses objects.

" }, "NextToken":{ - "shape":"StringWithCharLimit256", + "shape":"String", "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } @@ -2675,11 +2829,39 @@ "max":1000, "min":0 }, + "ExecutionControls":{ + "type":"structure", + "members":{ + "SsmControls":{ + "shape":"SsmControls", + "documentation":"

A SsmControls object.

" + } + }, + "documentation":"

The controls that AWS Config uses for executing remediations.

" + }, "Expression":{ "type":"string", "max":4096, "min":1 }, + "FailedDeleteRemediationExceptionsBatch":{ + "type":"structure", + "members":{ + "FailureMessage":{ + "shape":"String", + "documentation":"

Returns a failure message for delete remediation exception. For example, AWS Config creates an exception due to an internal error.

" + }, + "FailedItems":{ + "shape":"RemediationExceptionResourceKeys", + "documentation":"

Returns remediation exception resource key object of the failed items.

" + } + }, + "documentation":"

List of each of the failed delete remediation exceptions with specific reasons.

" + }, + "FailedDeleteRemediationExceptionsBatches":{ + "type":"list", + "member":{"shape":"FailedDeleteRemediationExceptionsBatch"} + }, "FailedRemediationBatch":{ "type":"structure", "members":{ @@ -2698,6 +2880,24 @@ "type":"list", "member":{"shape":"FailedRemediationBatch"} }, + "FailedRemediationExceptionBatch":{ + "type":"structure", + "members":{ + "FailureMessage":{ + "shape":"String", + "documentation":"

Returns a failure message. For example, the auto-remediation has failed.

" + }, + "FailedItems":{ + "shape":"RemediationExceptions", + "documentation":"

Returns remediation exception resource key object of the failed items.

" + } + }, + "documentation":"

List of each of the failed remediation exceptions with specific reasons.

" + }, + "FailedRemediationExceptionBatches":{ + "type":"list", + "member":{"shape":"FailedRemediationExceptionBatch"} + }, "FieldInfo":{ "type":"structure", "members":{ @@ -2748,7 +2948,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2761,7 +2961,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2787,7 +2987,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -2804,7 +3004,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -3027,17 +3227,35 @@ "type":"structure", "required":["OrganizationConfigRuleName"], "members":{ - "OrganizationConfigRuleName":{"shape":"StringWithCharLimit64"}, - "Filters":{"shape":"StatusDetailFilters"}, - "Limit":{"shape":"CosmosPageLimit"}, - "NextToken":{"shape":"String"} + "OrganizationConfigRuleName":{ + "shape":"OrganizationConfigRuleName", + "documentation":"

The name of organization config rule for which you want status details for member accounts.

" + }, + "Filters":{ + "shape":"StatusDetailFilters", + "documentation":"

A StatusDetailFilters object.

" + }, + "Limit":{ + "shape":"CosmosPageLimit", + "documentation":"

The maximum number of OrganizationConfigRuleDetailedStatus returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } } }, "GetOrganizationConfigRuleDetailedStatusResponse":{ "type":"structure", "members":{ - "OrganizationConfigRuleDetailedStatus":{"shape":"OrganizationConfigRuleDetailedStatus"}, - "NextToken":{"shape":"String"} + "OrganizationConfigRuleDetailedStatus":{ + "shape":"OrganizationConfigRuleDetailedStatus", + "documentation":"

A list of MemberAccountStatus objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } } }, "GetResourceConfigHistoryRequest":{ @@ -3131,7 +3349,7 @@ "type":"structure", "members":{ }, - "documentation":"

Indicates one of the following errors:

", + "documentation":"

Indicates one of the following errors:

", "exception":true }, "Integer":{"type":"integer"}, @@ -3340,7 +3558,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -3353,7 +3571,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" } } }, @@ -3383,6 +3601,7 @@ "type":"structure", "members":{ }, + "documentation":"

You have reached the limit of the number of organization config rules you can create.

", "exception":true }, "MaxNumberOfRetentionConfigurationsExceededException":{ @@ -3424,13 +3643,32 @@ "MemberAccountRuleStatus" ], "members":{ - "AccountId":{"shape":"AccountId"}, - "ConfigRuleName":{"shape":"StringWithCharLimit64"}, - "MemberAccountRuleStatus":{"shape":"MemberAccountRuleStatus"}, - "ErrorCode":{"shape":"String"}, - "ErrorMessage":{"shape":"String"}, - "LastUpdateTime":{"shape":"Date"} - } + "AccountId":{ + "shape":"AccountId", + "documentation":"

The 12-digit account ID of a member account.

" + }, + "ConfigRuleName":{ + "shape":"StringWithCharLimit64", + "documentation":"

The name of config rule deployed in the member account.

" + }, + "MemberAccountRuleStatus":{ + "shape":"MemberAccountRuleStatus", + "documentation":"

Indicates deployment status for config rule in the member account. When master account calls PutOrganizationConfigRule action for the first time, config rule status is created in the member account. When master account calls PutOrganizationConfigRule action for the second time, config rule status is updated in the member account. Config rule status is deleted when the master account deletes OrganizationConfigRule and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the rule to:

" + }, + "ErrorCode":{ + "shape":"String", + "documentation":"

An error code that is returned when config rule creation or deletion failed in the member account.

" + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

An error message indicating that config rule account creation or deletion has failed due to an error in the member account.

" + }, + "LastUpdateTime":{ + "shape":"Date", + "documentation":"

The timestamp of the last status update.

" + } + }, + "documentation":"

Organization config rule creation or deletion status in each member account. This includes the name of the rule, the status, error code and error message when the rule creation or deletion failed.

" }, "MessageType":{ "type":"string", @@ -3461,7 +3699,7 @@ "type":"structure", "members":{ }, - "documentation":"

Organization does is no longer available.

", + "documentation":"

Organization is no longer available.

", "exception":true }, "NoRunningConfigurationRecorderException":{ @@ -3510,6 +3748,7 @@ "type":"structure", "members":{ }, + "documentation":"

You specified one or more organization config rules that do not exist.

", "exception":true }, "NoSuchRemediationConfigurationException":{ @@ -3519,6 +3758,13 @@ "documentation":"

You specified an AWS Config rule without a remediation configuration.

", "exception":true }, + "NoSuchRemediationExceptionException":{ + "type":"structure", + "members":{ + }, + "documentation":"

You tried to delete a remediation exception that does not exist.

", + "exception":true + }, "NoSuchRetentionConfigurationException":{ "type":"structure", "members":{ @@ -3531,7 +3777,7 @@ "type":"structure", "members":{ }, - "documentation":"

No permission to call the EnableAWSServiceAccess API.

", + "documentation":"

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

", "exception":true }, "OrganizationAggregationSource":{ @@ -3557,7 +3803,7 @@ "type":"structure", "members":{ }, - "documentation":"

The configuration aggregator cannot be created because organization does not have all features enabled.

", + "documentation":"

AWS Config resource cannot be created because your organization does not have all features enabled.

", "exception":true }, "OrganizationConfigRule":{ @@ -3567,18 +3813,43 @@ "OrganizationConfigRuleArn" ], "members":{ - "OrganizationConfigRuleName":{"shape":"StringWithCharLimit64"}, - "OrganizationConfigRuleArn":{"shape":"StringWithCharLimit256"}, - "OrganizationManagedRuleMetadata":{"shape":"OrganizationManagedRuleMetadata"}, - "OrganizationCustomRuleMetadata":{"shape":"OrganizationCustomRuleMetadata"}, - "ExcludedAccounts":{"shape":"ExcludedAccounts"}, - "LastUpdateTime":{"shape":"Date"} - } + "OrganizationConfigRuleName":{ + "shape":"OrganizationConfigRuleName", + "documentation":"

The name that you assign to organization config rule.

" + }, + "OrganizationConfigRuleArn":{ + "shape":"StringWithCharLimit256", + "documentation":"

The Amazon Resource Name (ARN) of organization config rule.

" + }, + "OrganizationManagedRuleMetadata":{ + "shape":"OrganizationManagedRuleMetadata", + "documentation":"

An OrganizationManagedRuleMetadata object.

" + }, + "OrganizationCustomRuleMetadata":{ + "shape":"OrganizationCustomRuleMetadata", + "documentation":"

An OrganizationCustomRuleMetadata object.

" + }, + "ExcludedAccounts":{ + "shape":"ExcludedAccounts", + "documentation":"

A comma-separated list of accounts excluded from organization config rule.

" + }, + "LastUpdateTime":{ + "shape":"Date", + "documentation":"

The timestamp of the last update.

" + } + }, + "documentation":"

An organization config rule that has information about config rules that AWS Config creates in member accounts.

" }, "OrganizationConfigRuleDetailedStatus":{ "type":"list", "member":{"shape":"MemberAccountStatus"} }, + "OrganizationConfigRuleName":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*\\S.*" + }, "OrganizationConfigRuleNames":{ "type":"list", "member":{"shape":"StringWithCharLimit64"}, @@ -3592,12 +3863,28 @@ "OrganizationRuleStatus" ], "members":{ - "OrganizationConfigRuleName":{"shape":"StringWithCharLimit64"}, - "OrganizationRuleStatus":{"shape":"OrganizationRuleStatus"}, - "ErrorCode":{"shape":"String"}, - "ErrorMessage":{"shape":"String"}, - "LastUpdateTime":{"shape":"Date"} - } + "OrganizationConfigRuleName":{ + "shape":"OrganizationConfigRuleName", + "documentation":"

The name that you assign to organization config rule.

" + }, + "OrganizationRuleStatus":{ + "shape":"OrganizationRuleStatus", + "documentation":"

Indicates deployment status of an organization config rule. When master account calls PutOrganizationConfigRule action for the first time, config rule status is created in all the member accounts. When master account calls PutOrganizationConfigRule action for the second time, config rule status is updated in all the member accounts. Additionally, config rule status is updated when one or more member accounts join or leave an organization. Config rule status is deleted when the master account deletes OrganizationConfigRule in all the member accounts and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the rule to:

" + }, + "ErrorCode":{ + "shape":"String", + "documentation":"

An error code that is returned when organization config rule creation or deletion has failed.

" + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

An error message indicating that organization config rule creation or deletion failed due to an error.

" + }, + "LastUpdateTime":{ + "shape":"Date", + "documentation":"

The timestamp of the last update.

" + } + }, + "documentation":"

Returns the status for an organization config rule in an organization.

" }, "OrganizationConfigRuleStatuses":{ "type":"list", @@ -3626,30 +3913,83 @@ "OrganizationConfigRuleTriggerTypes" ], "members":{ - "Description":{"shape":"StringWithCharLimit256Min0"}, - "LambdaFunctionArn":{"shape":"StringWithCharLimit256"}, - "OrganizationConfigRuleTriggerTypes":{"shape":"OrganizationConfigRuleTriggerTypes"}, - "InputParameters":{"shape":"StringWithCharLimit2048"}, - "MaximumExecutionFrequency":{"shape":"MaximumExecutionFrequency"}, - "ResourceTypesScope":{"shape":"ResourceTypesScope"}, - "ResourceIdScope":{"shape":"StringWithCharLimit768"}, - "TagKeyScope":{"shape":"StringWithCharLimit128"}, - "TagValueScope":{"shape":"StringWithCharLimit256"} - } + "Description":{ + "shape":"StringWithCharLimit256Min0", + "documentation":"

The description that you provide for organization config rule.

" + }, + "LambdaFunctionArn":{ + "shape":"StringWithCharLimit256", + "documentation":"

The lambda function ARN.

" + }, + "OrganizationConfigRuleTriggerTypes":{ + "shape":"OrganizationConfigRuleTriggerTypes", + "documentation":"

The type of notification that triggers AWS Config to run an evaluation for a rule. You can specify the following notification types:

" + }, + "InputParameters":{ + "shape":"StringWithCharLimit2048", + "documentation":"

A string, in JSON format, that is passed to organization config rule Lambda function.

" + }, + "MaximumExecutionFrequency":{ + "shape":"MaximumExecutionFrequency", + "documentation":"

The maximum frequency with which AWS Config runs evaluations for a rule. Your custom rule is triggered when AWS Config delivers the configuration snapshot. For more information, see ConfigSnapshotDeliveryProperties.

By default, rules with a periodic trigger are evaluated every 24 hours. To change the frequency, specify a valid value for the MaximumExecutionFrequency parameter.

" + }, + "ResourceTypesScope":{ + "shape":"ResourceTypesScope", + "documentation":"

The type of the AWS resource that was evaluated.

" + }, + "ResourceIdScope":{ + "shape":"StringWithCharLimit768", + "documentation":"

The ID of the AWS resource that was evaluated.

" + }, + "TagKeyScope":{ + "shape":"StringWithCharLimit128", + "documentation":"

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

" + }, + "TagValueScope":{ + "shape":"StringWithCharLimit256", + "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" + } + }, + "documentation":"

An object that specifies organization custom rule metadata such as resource type, resource ID of AWS resource, Lamdba function ARN, and organization trigger types that trigger AWS Config to evaluate your AWS resources against a rule. It also provides the frequency with which you want AWS Config to run evaluations for the rule if the trigger type is periodic.

" }, "OrganizationManagedRuleMetadata":{ "type":"structure", "required":["RuleIdentifier"], "members":{ - "Description":{"shape":"StringWithCharLimit256Min0"}, - "RuleIdentifier":{"shape":"StringWithCharLimit256"}, - "InputParameters":{"shape":"StringWithCharLimit2048"}, - "MaximumExecutionFrequency":{"shape":"MaximumExecutionFrequency"}, - "ResourceTypesScope":{"shape":"ResourceTypesScope"}, - "ResourceIdScope":{"shape":"StringWithCharLimit768"}, - "TagKeyScope":{"shape":"StringWithCharLimit128"}, - "TagValueScope":{"shape":"StringWithCharLimit256"} - } + "Description":{ + "shape":"StringWithCharLimit256Min0", + "documentation":"

The description that you provide for organization config rule.

" + }, + "RuleIdentifier":{ + "shape":"StringWithCharLimit256", + "documentation":"

For organization config managed rules, a predefined identifier from a list. For example, IAM_PASSWORD_POLICY is a managed rule. To reference a managed rule, see Using AWS Managed Config Rules.

" + }, + "InputParameters":{ + "shape":"StringWithCharLimit2048", + "documentation":"

A string, in JSON format, that is passed to organization config rule Lambda function.

" + }, + "MaximumExecutionFrequency":{ + "shape":"MaximumExecutionFrequency", + "documentation":"

The maximum frequency with which AWS Config runs evaluations for a rule. You are using an AWS managed rule that is triggered at a periodic frequency.

By default, rules with a periodic trigger are evaluated every 24 hours. To change the frequency, specify a valid value for the MaximumExecutionFrequency parameter.

" + }, + "ResourceTypesScope":{ + "shape":"ResourceTypesScope", + "documentation":"

The type of the AWS resource that was evaluated.

" + }, + "ResourceIdScope":{ + "shape":"StringWithCharLimit768", + "documentation":"

The ID of the AWS resource that was evaluated.

" + }, + "TagKeyScope":{ + "shape":"StringWithCharLimit128", + "documentation":"

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

" + }, + "TagValueScope":{ + "shape":"StringWithCharLimit256", + "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" + } + }, + "documentation":"

An object that specifies organization managed rule metadata such as resource type and ID of AWS resource along with the rule identifier. It also provides the frequency with which you want AWS Config to run evaluations for the rule if the trigger type is periodic.

" }, "OrganizationRuleStatus":{ "type":"string", @@ -3697,6 +4037,12 @@ "type":"list", "member":{"shape":"PendingAggregationRequest"} }, + "Percentage":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "PutAggregationAuthorizationRequest":{ "type":"structure", "required":[ @@ -3712,7 +4058,10 @@ "shape":"AwsRegion", "documentation":"

The region authorized to collect aggregated data.

" }, - "Tags":{"shape":"TagsList"} + "Tags":{ + "shape":"TagsList", + "documentation":"

An array of tag object.

" + } } }, "PutAggregationAuthorizationResponse":{ @@ -3732,7 +4081,10 @@ "shape":"ConfigRule", "documentation":"

The rule that you want to add to your account.

" }, - "Tags":{"shape":"TagsList"} + "Tags":{ + "shape":"TagsList", + "documentation":"

An array of tag object.

" + } } }, "PutConfigurationAggregatorRequest":{ @@ -3751,7 +4103,10 @@ "shape":"OrganizationAggregationSource", "documentation":"

An OrganizationAggregationSource object.

" }, - "Tags":{"shape":"TagsList"} + "Tags":{ + "shape":"TagsList", + "documentation":"

An array of tag object.

" + } } }, "PutConfigurationAggregatorResponse":{ @@ -3818,16 +4173,31 @@ "type":"structure", "required":["OrganizationConfigRuleName"], "members":{ - "OrganizationConfigRuleName":{"shape":"StringWithCharLimit64"}, - "OrganizationManagedRuleMetadata":{"shape":"OrganizationManagedRuleMetadata"}, - "OrganizationCustomRuleMetadata":{"shape":"OrganizationCustomRuleMetadata"}, - "ExcludedAccounts":{"shape":"ExcludedAccounts"} + "OrganizationConfigRuleName":{ + "shape":"OrganizationConfigRuleName", + "documentation":"

The name that you assign to an organization config rule.

" + }, + "OrganizationManagedRuleMetadata":{ + "shape":"OrganizationManagedRuleMetadata", + "documentation":"

An OrganizationManagedRuleMetadata object.

" + }, + "OrganizationCustomRuleMetadata":{ + "shape":"OrganizationCustomRuleMetadata", + "documentation":"

An OrganizationCustomRuleMetadata object.

" + }, + "ExcludedAccounts":{ + "shape":"ExcludedAccounts", + "documentation":"

A comma-separated list of accounts that you want to exclude from an organization config rule.

" + } } }, "PutOrganizationConfigRuleResponse":{ "type":"structure", "members":{ - "OrganizationConfigRuleArn":{"shape":"StringWithCharLimit256"} + "OrganizationConfigRuleArn":{ + "shape":"StringWithCharLimit256", + "documentation":"

The Amazon Resource Name (ARN) of an organization config rule.

" + } } }, "PutRemediationConfigurationsRequest":{ @@ -3849,6 +4219,40 @@ } } }, + "PutRemediationExceptionsRequest":{ + "type":"structure", + "required":[ + "ConfigRuleName", + "ResourceKeys" + ], + "members":{ + "ConfigRuleName":{ + "shape":"ConfigRuleName", + "documentation":"

The name of the AWS Config rule for which you want to create remediation exception.

" + }, + "ResourceKeys":{ + "shape":"RemediationExceptionResourceKeys", + "documentation":"

An exception list of resource exception keys to be processed with the current request. AWS Config adds exception for each resource key. For example, AWS Config adds 3 exceptions for 3 resource keys.

" + }, + "Message":{ + "shape":"StringWithCharLimit1024", + "documentation":"

The message contains an explanation of the exception.

" + }, + "ExpirationTime":{ + "shape":"Date", + "documentation":"

The exception is automatically deleted after the expiration date.

" + } + } + }, + "PutRemediationExceptionsResponse":{ + "type":"structure", + "members":{ + "FailedBatches":{ + "shape":"FailedRemediationExceptionBatches", + "documentation":"

Returns a list of failed remediation exceptions batch objects. Each object in the batch consists of a list of failed items and failure messages.

" + } + } + }, "PutRetentionConfigurationRequest":{ "type":"structure", "required":["RetentionPeriodInDays"], @@ -3956,7 +4360,7 @@ ], "members":{ "ConfigRuleName":{ - "shape":"StringWithCharLimit64", + "shape":"ConfigRuleName", "documentation":"

The name of the AWS Config rule.

" }, "TargetType":{ @@ -3978,6 +4382,30 @@ "ResourceType":{ "shape":"String", "documentation":"

The type of a resource.

" + }, + "Automatic":{ + "shape":"Boolean", + "documentation":"

The remediation is triggered automatically.

" + }, + "ExecutionControls":{ + "shape":"ExecutionControls", + "documentation":"

An ExecutionControls object.

" + }, + "MaximumAutomaticAttempts":{ + "shape":"AutoRemediationAttempts", + "documentation":"

The maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5.

For example, if you specify MaximumAutomaticAttempts as 5 with RetryAttemptsSeconds as 50 seconds, AWS Config throws an exception after the 5th failed attempt within 50 seconds.

" + }, + "RetryAttemptSeconds":{ + "shape":"AutoRemediationAttemptSeconds", + "documentation":"

Maximum time in seconds that AWS Config runs auto-remediation. If you do not select a number, the default is 60 seconds.

For example, if you specify RetryAttemptsSeconds as 50 seconds and MaximumAutomaticAttempts as 5, AWS Config will run auto-remediations 5 times within 50 seconds before throwing an exception.

" + }, + "Arn":{ + "shape":"StringWithCharLimit1024", + "documentation":"

Amazon Resource Name (ARN) of remediation configuration.

" + }, + "CreatedByService":{ + "shape":"StringWithCharLimit1024", + "documentation":"

Name of the service that owns the service linked rule, if applicable.

" } }, "documentation":"

An object that represents the details about the remediation configuration that includes the remediation action, parameters, and data to execute the action.

" @@ -3988,6 +4416,63 @@ "max":25, "min":0 }, + "RemediationException":{ + "type":"structure", + "required":[ + "ConfigRuleName", + "ResourceType", + "ResourceId" + ], + "members":{ + "ConfigRuleName":{ + "shape":"ConfigRuleName", + "documentation":"

The name of the AWS Config rule.

" + }, + "ResourceType":{ + "shape":"StringWithCharLimit256", + "documentation":"

The type of a resource.

" + }, + "ResourceId":{ + "shape":"StringWithCharLimit1024", + "documentation":"

The ID of the resource (for example., sg-xxxxxx).

" + }, + "Message":{ + "shape":"StringWithCharLimit1024", + "documentation":"

An explanation of an remediation exception.

" + }, + "ExpirationTime":{ + "shape":"Date", + "documentation":"

The time when the remediation exception will be deleted.

" + } + }, + "documentation":"

An object that represents the details about the remediation exception. The details include the rule name, an explanation of an exception, the time when the exception will be deleted, the resource ID, and resource type.

" + }, + "RemediationExceptionResourceKey":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"StringWithCharLimit256", + "documentation":"

The type of a resource.

" + }, + "ResourceId":{ + "shape":"StringWithCharLimit1024", + "documentation":"

The ID of the resource (for example., sg-xxxxxx).

" + } + }, + "documentation":"

The details that identify a resource within AWS Config, including the resource type and resource ID.

" + }, + "RemediationExceptionResourceKeys":{ + "type":"list", + "member":{"shape":"RemediationExceptionResourceKey"}, + "max":100, + "min":1 + }, + "RemediationExceptions":{ + "type":"list", + "member":{"shape":"RemediationException"}, + "max":25, + "min":0 + }, "RemediationExecutionState":{ "type":"string", "enum":[ @@ -4066,6 +4551,7 @@ "type":"structure", "members":{ }, + "documentation":"

Remediation action is in progress. You can either cancel execution in AWS Systems Manager or wait and try again later.

", "exception":true }, "RemediationParameterValue":{ @@ -4206,7 +4692,7 @@ "type":"structure", "members":{ }, - "documentation":"

The rule is currently being deleted or the rule is deleting your evaluation results. Try your request again later.

", + "documentation":"

You see this exception in the following cases:

", "exception":true }, "ResourceKey":{ @@ -4266,55 +4752,81 @@ "AWS::EC2::VPC", "AWS::EC2::VPNConnection", "AWS::EC2::VPNGateway", + "AWS::EC2::RegisteredHAInstance", + "AWS::EC2::NatGateway", + "AWS::EC2::EgressOnlyInternetGateway", + "AWS::EC2::VPCEndpoint", + "AWS::EC2::VPCEndpointService", + "AWS::EC2::FlowLog", + "AWS::EC2::VPCPeeringConnection", "AWS::IAM::Group", "AWS::IAM::Policy", "AWS::IAM::Role", "AWS::IAM::User", + "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ACM::Certificate", "AWS::RDS::DBInstance", + "AWS::RDS::DBParameterGroup", + "AWS::RDS::DBOptionGroup", "AWS::RDS::DBSubnetGroup", "AWS::RDS::DBSecurityGroup", "AWS::RDS::DBSnapshot", + "AWS::RDS::DBCluster", + "AWS::RDS::DBClusterParameterGroup", + "AWS::RDS::DBClusterSnapshot", "AWS::RDS::EventSubscription", - "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::S3::Bucket", - "AWS::SSM::ManagedInstanceInventory", + "AWS::S3::AccountPublicAccessBlock", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", + "AWS::SSM::ManagedInstanceInventory", "AWS::CloudWatch::Alarm", "AWS::CloudFormation::Stack", - "AWS::DynamoDB::Table", + "AWS::ElasticLoadBalancing::LoadBalancer", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", "AWS::AutoScaling::ScheduledAction", + "AWS::DynamoDB::Table", "AWS::CodeBuild::Project", "AWS::WAF::RateBasedRule", "AWS::WAF::Rule", + "AWS::WAF::RuleGroup", "AWS::WAF::WebACL", "AWS::WAFRegional::RateBasedRule", "AWS::WAFRegional::Rule", + "AWS::WAFRegional::RuleGroup", "AWS::WAFRegional::WebACL", "AWS::CloudFront::Distribution", "AWS::CloudFront::StreamingDistribution", - "AWS::WAF::RuleGroup", - "AWS::WAFRegional::RuleGroup", + "AWS::Lambda::Alias", "AWS::Lambda::Function", "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", - "AWS::ElasticLoadBalancing::LoadBalancer", + "AWS::MobileHub::Project", "AWS::XRay::EncryptionConfig", "AWS::SSM::AssociationCompliance", "AWS::SSM::PatchCompliance", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::Config::ResourceCompliance", - "AWS::CodePipeline::Pipeline" + "AWS::LicenseManager::LicenseConfiguration", + "AWS::ApiGateway::DomainName", + "AWS::ApiGateway::Method", + "AWS::ApiGateway::Stage", + "AWS::ApiGateway::RestApi", + "AWS::ApiGatewayV2::DomainName", + "AWS::ApiGatewayV2::Stage", + "AWS::ApiGatewayV2::Api", + "AWS::CodePipeline::Pipeline", + "AWS::ServiceCatalog::CloudFormationProvisionedProduct", + "AWS::ServiceCatalog::CloudFormationProduct", + "AWS::ServiceCatalog::Portfolio" ] }, "ResourceTypeList":{ @@ -4335,6 +4847,7 @@ }, "ResourceValue":{ "type":"structure", + "required":["Value"], "members":{ "Value":{ "shape":"ResourceValueType", @@ -4498,6 +5011,20 @@ "max":25, "min":0 }, + "SsmControls":{ + "type":"structure", + "members":{ + "ConcurrentExecutionRatePercentage":{ + "shape":"Percentage", + "documentation":"

The maximum percentage of remediation actions allowed to run in parallel on the non-compliant resources for that specific rule. You can specify a percentage, such as 10%. The default value is 10.

" + }, + "ErrorPercentage":{ + "shape":"Percentage", + "documentation":"

The percentage of errors that are allowed before SSM stops running automations on non-compliant resources for that specific rule. You can specify a percentage of errors, for example 10%. If you do not specifiy a percentage, the default is 50%. For example, if you set the ErrorPercentage to 40% for 10 non-compliant resources, then SSM stops running the automations when the fifth error is received.

" + } + }, + "documentation":"

AWS Systems Manager (SSM) specific remediation controls.

" + }, "StartConfigRulesEvaluationRequest":{ "type":"structure", "members":{ @@ -4533,7 +5060,7 @@ ], "members":{ "ConfigRuleName":{ - "shape":"StringWithCharLimit64", + "shape":"ConfigRuleName", "documentation":"

The list of names of AWS Config rules that you want to run remediation execution for.

" }, "ResourceKeys":{ @@ -4563,6 +5090,7 @@ }, "StaticValue":{ "type":"structure", + "required":["Values"], "members":{ "Values":{ "shape":"StaticParameterValues", @@ -4574,9 +5102,16 @@ "StatusDetailFilters":{ "type":"structure", "members":{ - "AccountId":{"shape":"AccountId"}, - "MemberAccountRuleStatus":{"shape":"MemberAccountRuleStatus"} - } + "AccountId":{ + "shape":"AccountId", + "documentation":"

The 12-digit account ID of the member account within an organization.

" + }, + "MemberAccountRuleStatus":{ + "shape":"MemberAccountRuleStatus", + "documentation":"

Indicates deployment status for config rule in the member account. When master account calls PutOrganizationConfigRule action for the first time, config rule status is created in the member account. When master account calls PutOrganizationConfigRule action for the second time, config rule status is updated in the member account. Config rule status is deleted when the master account deletes OrganizationConfigRule and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the rule to:

" + } + }, + "documentation":"

Status filter object to filter results based on specific member account ID or status type for an organization config rule.

" }, "StopConfigurationRecorderRequest":{ "type":"structure", diff --git a/botocore/data/cur/2017-01-06/service-2.json b/botocore/data/cur/2017-01-06/service-2.json index b93be585..959e90ff 100644 --- a/botocore/data/cur/2017-01-06/service-2.json +++ b/botocore/data/cur/2017-01-06/service-2.json @@ -40,6 +40,20 @@ ], "documentation":"

Lists the AWS Cost and Usage reports available to this account.

" }, + "ModifyReportDefinition":{ + "name":"ModifyReportDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReportDefinitionRequest"}, + "output":{"shape":"ModifyReportDefinitionResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Allows you to programatically update your report preferences.

" + }, "PutReportDefinition":{ "name":"PutReportDefinition", "http":{ @@ -71,7 +85,8 @@ "ap-southeast-2", "ap-northeast-1", "eu-north-1", - "ap-northeast-3" + "ap-northeast-3", + "ap-east-1" ] }, "AdditionalArtifact":{ @@ -166,6 +181,22 @@ "max":5, "min":5 }, + "ModifyReportDefinitionRequest":{ + "type":"structure", + "required":[ + "ReportName", + "ReportDefinition" + ], + "members":{ + "ReportName":{"shape":"ReportName"}, + "ReportDefinition":{"shape":"ReportDefinition"} + } + }, + "ModifyReportDefinitionResponse":{ + "type":"structure", + "members":{ + } + }, "PutReportDefinitionRequest":{ "type":"structure", "required":["ReportDefinition"], diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 6c1adae8..63e65681 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -82,7 +82,21 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon S3 bucket.

For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section.

For more information, see Configuring Amazon S3 Location Settings in the AWS DataSync User Guide.

" + "documentation":"

Creates an endpoint for an Amazon S3 bucket.

For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section.

For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location in the AWS DataSync User Guide.

" + }, + "CreateLocationSmb":{ + "name":"CreateLocationSmb", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationSmbRequest"}, + "output":{"shape":"CreateLocationSmbResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Creates an endpoint for a Server Message Block (SMB) file system.

" }, "CreateTask":{ "name":"CreateTask", @@ -196,6 +210,20 @@ ], "documentation":"

Returns metadata, such as bucket name, about an Amazon S3 bucket location.

" }, + "DescribeLocationSmb":{ + "name":"DescribeLocationSmb", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationSmbRequest"}, + "output":{"shape":"DescribeLocationSmbResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Returns metadata, such as the path and user information about a SMB location.

" + }, "DescribeTask":{ "name":"DescribeTask", "http":{ @@ -460,7 +488,7 @@ }, "SubnetArns":{ "shape":"PLSubnetArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the subnets in which DataSync will create Elastic Network Interfaces (ENIs) for each data transfer task. The agent that runs a task must be private. When you start a task that is associated with an agent created in a VPC, or one that has access to an IP address in a VPC, then the task is also private. In this case, DataSync creates four ENIs for each task in your subnet. For a data transfer to work, the agent must be able to route to all these four ENIs.

" + "documentation":"

The Amazon Resource Names (ARNs) of the subnets in which DataSync will create elastic network interfaces for each data transfer task. The agent that runs a task must be private. When you start a task that is associated with an agent created in a VPC, or one that has access to an IP address in a VPC, then the task is also private. In this case, DataSync creates four network interfaces for each task in your subnet. For a data transfer to work, the agent must be able to route to all these four network interfaces.

" }, "SecurityGroupArns":{ "shape":"PLSecurityGroupArnList", @@ -571,6 +599,10 @@ "shape":"S3BucketArn", "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket.

" }, + "S3StorageClass":{ + "shape":"S3StorageClass", + "documentation":"

The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" + }, "S3Config":{"shape":"S3Config"}, "Tags":{ "shape":"TagList", @@ -589,6 +621,61 @@ }, "documentation":"

CreateLocationS3Response

" }, + "CreateLocationSmbRequest":{ + "type":"structure", + "required":[ + "Subdirectory", + "ServerHostname", + "User", + "Password", + "AgentArns" + ], + "members":{ + "Subdirectory":{ + "shape":"NonEmptySubdirectory", + "documentation":"

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.

" + }, + "ServerHostname":{ + "shape":"ServerHostname", + "documentation":"

The name of the SMB server. This value is the IP address or Domain Name Service (DNS) name of the SMB server. An agent that is installed on-premises uses this hostname to mount the SMB server in a network.

This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

The user who can mount the share, has the permissions to access files and folders in the SMB share.

" + }, + "Domain":{ + "shape":"SmbDomain", + "documentation":"

The name of the Windows domain that the SMB server belongs to.

" + }, + "Password":{ + "shape":"SmbPassword", + "documentation":"

The password of the user who can mount the share, has the permissions to access files and folders in the SMB share.

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.

" + }, + "MountOptions":{ + "shape":"SmbMountOptions", + "documentation":"

The mount options used by DataSync to access the SMB server.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

" + } + }, + "documentation":"

CreateLocationSmbRequest

" + }, + "CreateLocationSmbResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

" + } + }, + "documentation":"

CreateLocationSmbResponse

" + }, "CreateTaskRequest":{ "type":"structure", "required":[ @@ -816,6 +903,10 @@ "shape":"LocationUri", "documentation":"

The URL of the Amazon S3 location that was described.

" }, + "S3StorageClass":{ + "shape":"S3StorageClass", + "documentation":"

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" + }, "S3Config":{"shape":"S3Config"}, "CreationTime":{ "shape":"Time", @@ -824,6 +915,51 @@ }, "documentation":"

DescribeLocationS3Response

" }, + "DescribeLocationSmbRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon resource Name (ARN) of the SMB location to describe.

" + } + }, + "documentation":"

DescribeLocationSmbRequest

" + }, + "DescribeLocationSmbResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon resource Name (ARN) of the SMB location that was described.

" + }, + "LocationUri":{ + "shape":"LocationUri", + "documentation":"

The URL of the source SBM location that was described.

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

The user who can mount the share, has the permissions to access files and folders in the SMB share.

" + }, + "Domain":{ + "shape":"SmbDomain", + "documentation":"

The name of the Windows domain that the SMB server belongs to.

" + }, + "MountOptions":{ + "shape":"SmbMountOptions", + "documentation":"

The mount options that are available for DataSync to use to access an SMB location.

" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"

The time that the SMB location was created.

" + } + }, + "documentation":"

DescribeLocationSmbResponse

" + }, "DescribeTaskExecutionRequest":{ "type":"structure", "required":["TaskExecutionArn"], @@ -1257,7 +1393,8 @@ }, "LocationUri":{ "type":"string", - "pattern":"(efs|nfs|s3)://[a-zA-Z0-9.\\-]+" + "max":4355, + "pattern":"^(efs|nfs|s3|smb)://[a-zA-Z0-9.\\-]+$" }, "LogGroupArn":{ "type":"string", @@ -1291,7 +1428,7 @@ "members":{ "Version":{ "shape":"NfsVersion", - "documentation":"

The specific NFS version that you want DataSync to use to mount your NFS share. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the NFS server.

" + "documentation":"

The specific NFS version that you want DataSync to use to mount your NFS share. If the server refuses to use the version specified, the sync will fail. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the NFS server.

You can specify the following NFS versions:

" } }, "documentation":"

Represents the mount options that are available for DataSync to access an NFS location.

" @@ -1326,7 +1463,11 @@ "members":{ "VerifyMode":{ "shape":"VerifyMode", - "documentation":"

A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred.

Default value: POINT_IN_TIME_CONSISTENT.

POINT_IN_TIME_CONSISTENT: Perform verification (recommended).

NONE: Skip verification.

" + "documentation":"

A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred.

Default value: POINT_IN_TIME_CONSISTENT.

POINT_IN_TIME_CONSISTENT: Perform verification (recommended).

ONLY_FILES_TRANSFERRED: Perform verification on only files that were transferred.

NONE: Skip verification.

" + }, + "OverwriteMode":{ + "shape":"OverwriteMode", + "documentation":"

A value that determines whether files at the destination should be overwritten or preserved when copying files. If set to NEVER a destination file will not be replaced by a source file, even if the destination file differs from the source file. If you modify files in the destination and you sync the files, you can use this value to protect against overwriting those changes.

Some storage classes have specific behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes in the AWS DataSync User Guide.

" }, "Atime":{ "shape":"Atime", @@ -1346,7 +1487,7 @@ }, "PreserveDeletedFiles":{ "shape":"PreserveDeletedFiles", - "documentation":"

A value that specifies whether files in the destination that don't exist in the source file system should be preserved.

Default value: PRESERVE.

PRESERVE: Ignore such destination files (recommended).

REMOVE: Delete destination files that aren’t present in the source.

" + "documentation":"

A value that specifies whether files in the destination that don't exist in the source file system should be preserved. This option can affect your storage cost. If your task deletes objects, you might incur minimum storage duration charges for certain storage classes. For detailed information, see using-storage-classes in the AWS DataSync User Guide.

Default value: PRESERVE.

PRESERVE: Ignore such destination files (recommended).

REMOVE: Delete destination files that aren’t present in the source.

" }, "PreserveDevices":{ "shape":"PreserveDevices", @@ -1363,6 +1504,13 @@ }, "documentation":"

Represents the options that are available to control the behavior of a StartTaskExecution operation. Behavior includes preserving metadata such as user ID (UID), group ID (GID), and file permissions, and also overwriting files in the destination, data integrity verification, and so on.

A task has a set of default options associated with it. If you don't specify an option in StartTaskExecution, the default value is used. You can override the defaults options on each task execution by specifying an overriding Options value to StartTaskExecution.

" }, + "OverwriteMode":{ + "type":"string", + "enum":[ + "ALWAYS", + "NEVER" + ] + }, "PLSecurityGroupArnList":{ "type":"list", "member":{"shape":"Ec2SecurityGroupArn"}, @@ -1443,11 +1591,55 @@ }, "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that is used to access an Amazon S3 bucket.

For detailed information about using such a role, see Creating a Location for Amazon S3 in the AWS DataSync User Guide.

" }, + "S3StorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE" + ] + }, "ServerHostname":{ "type":"string", "max":255, "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])$" }, + "SmbDomain":{ + "type":"string", + "max":253, + "pattern":"^([A-Za-z0-9]+[A-Za-z0-9-.]*)*[A-Za-z0-9-]*[A-Za-z0-9]$" + }, + "SmbMountOptions":{ + "type":"structure", + "members":{ + "Version":{ + "shape":"SmbVersion", + "documentation":"

The specific SMB version that you want DataSync to use to mount your SMB share. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the SMB server.

" + } + }, + "documentation":"

Represents the mount options that are available for DataSync to access an SMB location.

" + }, + "SmbPassword":{ + "type":"string", + "max":104, + "pattern":"^.{0,104}$" + }, + "SmbUser":{ + "type":"string", + "max":104, + "pattern":"^[^\\\\x5B\\\\x5D\\\\/:;|=,+*?]{1,104}$" + }, + "SmbVersion":{ + "type":"string", + "enum":[ + "AUTOMATIC", + "SMB2", + "SMB3" + ] + }, "SourceNetworkInterfaceArns":{ "type":"list", "member":{"shape":"NetworkInterfaceArn"} @@ -1744,6 +1936,7 @@ "type":"string", "enum":[ "POINT_IN_TIME_CONSISTENT", + "ONLY_FILES_TRANSFERRED", "NONE" ] }, diff --git a/botocore/data/dms/2016-01-01/service-2.json b/botocore/data/dms/2016-01-01/service-2.json index 886333d5..10909d83 100644 --- a/botocore/data/dms/2016-01-01/service-2.json +++ b/botocore/data/dms/2016-01-01/service-2.json @@ -150,6 +150,21 @@ ], "documentation":"

Deletes the specified certificate.

" }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionMessage"}, + "output":{"shape":"DeleteConnectionResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Deletes the connection between a replication instance and an endpoint.

" + }, "DeleteEndpoint":{ "name":"DeleteEndpoint", "http":{ @@ -971,7 +986,7 @@ }, "DmsTransferSettings":{ "shape":"DmsTransferSettings", - "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible attributes include the following:

Shorthand syntax for these attributes is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these attributes is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible settings include the following:

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" }, "MongoDbSettings":{ "shape":"MongoDbSettings", @@ -1254,6 +1269,34 @@ } } }, + "DeleteConnectionMessage":{ + "type":"structure", + "required":[ + "EndpointArn", + "ReplicationInstanceArn" + ], + "members":{ + "EndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

" + }, + "ReplicationInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the replication instance.

" + } + }, + "documentation":"

" + }, + "DeleteConnectionResponse":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

The connection that is being deleted.

" + } + }, + "documentation":"

" + }, "DeleteEndpointMessage":{ "type":"structure", "required":["EndpointArn"], @@ -1370,7 +1413,7 @@ }, "UniqueAccountIdentifier":{ "shape":"String", - "documentation":"

A unique AWS DMS identifier for an account in a particular AWS Region. The value of this identifier has the following format: c99999999999. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given AWS Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this default S3 bucket: dms-111122223333-c44445555666.

AWS DMS supports UniqueAccountIdentifier in versions 3.1.4 and later.

" + "documentation":"

A unique AWS DMS identifier for an account in a particular AWS Region. The value of this identifier has the following format: c99999999999. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given AWS Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this default S3 bucket: dms-111122223333-c44445555666.

AWS DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 and later.

" } }, "documentation":"

" @@ -2103,7 +2146,7 @@ }, "DmsTransferSettings":{ "shape":"DmsTransferSettings", - "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible attributes include the following:

Shorthand syntax for these attributes is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these attributes is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + "documentation":"

The settings in JSON format for the DMS transfer type of source endpoint.

Possible settings include the following:

Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string

JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" }, "MongoDbSettings":{ "shape":"MongoDbSettings", @@ -2740,7 +2783,7 @@ }, "AuthMechanism":{ "shape":"AuthMechanismValue", - "documentation":"

The authentication mechanism you use to access the MongoDB source endpoint.

Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1

DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version 3.x, use SCRAM_SHA_1. This attribute is not used when authType=No.

" + "documentation":"

The authentication mechanism you use to access the MongoDB source endpoint.

Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1

DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version 3.x, use SCRAM_SHA_1. This setting is not used when authType=No.

" }, "NestingLevel":{ "shape":"NestingLevelValue", @@ -2748,15 +2791,15 @@ }, "ExtractDocId":{ "shape":"String", - "documentation":"

Specifies the document ID. Use this attribute when NestingLevel is set to NONE.

Default value is false.

" + "documentation":"

Specifies the document ID. Use this setting when NestingLevel is set to NONE.

Default value is false.

" }, "DocsToInvestigate":{ "shape":"String", - "documentation":"

Indicates the number of documents to preview to determine the document organization. Use this attribute when NestingLevel is set to ONE.

Must be a positive value greater than 0. Default value is 1000.

" + "documentation":"

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to ONE.

Must be a positive value greater than 0. Default value is 1000.

" }, "AuthSource":{ "shape":"String", - "documentation":"

The MongoDB database name. This attribute is not used when authType=NO.

The default is admin.

" + "documentation":"

The MongoDB database name. This setting is not used when authType=NO.

The default is admin.

" }, "KmsKeyId":{ "shape":"String", @@ -2809,7 +2852,7 @@ }, "ReleaseStatus":{ "shape":"ReleaseStatusValues", - "documentation":"

The value returned when the specified EngineVersion of the replication instance is in Beta or test mode. This indicates some features might not work as expected.

AWS DMS supports ReleaseStatus in versions 3.1.4 and later.

" + "documentation":"

The value returned when the specified EngineVersion of the replication instance is in Beta or test mode. This indicates some features might not work as expected.

AWS DMS supports the ReleaseStatus parameter in versions 3.1.4 and later.

" } }, "documentation":"

" @@ -3587,15 +3630,19 @@ }, "IncludeOpForFullLoad":{ "shape":"BooleanOptional", - "documentation":"

A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.

AWS DMS supports IncludeOpForFullLoad in versions 3.1.4 and later.

For full load, records can only be inserted. By default (the false setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.

This setting works together with CdcInsertsOnly for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

" + "documentation":"

A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.

AWS DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and later.

For full load, records can only be inserted. By default (the false setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad is set to true or y, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.

This setting works together with the CdcInsertsOnly parameter for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

" }, "CdcInsertsOnly":{ "shape":"BooleanOptional", - "documentation":"

A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.

If cdcInsertsOnly is set to true or y, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad is set to false, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports this interaction between CdcInsertsOnly and IncludeOpForFullLoad in versions 3.1.4 and later.

" + "documentation":"

A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.

If CdcInsertsOnly is set to true or y, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad is set to true, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad is set to false, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..

AWS DMS supports this interaction between the CdcInsertsOnly and IncludeOpForFullLoad parameters in versions 3.1.4 and later.

" }, "TimestampColumnName":{ "shape":"String", - "documentation":"

A value that includes a timestamp column in the Amazon S3 target endpoint data. AWS DMS includes an additional column in the migrated data when you set timestampColumnName to a non-blank value.

AWS DMS supports TimestampColumnName in versions 3.1.4 and later.

For a full load, each row of the timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS. For a CDC load, each row of the timestamp column contains the timestamp for the commit of that row in the source database. The format for the timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. For CDC, the microsecond precision depends on the commit timestamp supported by DMS for the source database. When the AddColumnName setting is set to true, DMS also includes the name for the timestamp column that you set as the nonblank value of timestampColumnName.

" + "documentation":"

A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an Amazon S3 target.

AWS DMS supports the TimestampColumnName parameter in versions 3.1.4 and later.

DMS includes an additional STRING column in the .csv or .parquet object files of your migrated data when you set TimestampColumnName to a nonblank value.

For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS.

For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of that row in the source database.

The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit timestamp supported by DMS for the source database.

When the AddColumnName parameter is set to true, DMS also includes a name for the timestamp column that you set with TimestampColumnName.

" + }, + "ParquetTimestampInMillisecond":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies the precision of any TIMESTAMP column values that are written to an Amazon S3 object file in .parquet format.

AWS DMS supports the ParquetTimestampInMillisecond parameter in versions 3.1.4 and later.

When ParquetTimestampInMillisecond is set to true or y, AWS DMS writes all TIMESTAMP columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes them with microsecond precision.

Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP values. Set this parameter to true for S3 endpoint object files that are .parquet formatted only if you plan to query or process the data with Athena or AWS Glue.

AWS DMS writes any TIMESTAMP column values written to an S3 file in .csv format with microsecond precision.

Setting ParquetTimestampInMillisecond has no effect on the string format of the timestamp column value that is inserted by setting the TimestampColumnName parameter.

" } }, "documentation":"

Settings for exporting data to Amazon S3.

" diff --git a/botocore/data/docdb/2014-10-31/service-2.json b/botocore/data/docdb/2014-10-31/service-2.json index 7a24304f..5a57835f 100644 --- a/botocore/data/docdb/2014-10-31/service-2.json +++ b/botocore/data/docdb/2014-10-31/service-2.json @@ -285,6 +285,22 @@ ], "documentation":"

Deletes a DB subnet group.

The specified database subnet group must not be associated with any DB instances.

" }, + "DescribeCertificates":{ + "name":"DescribeCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCertificatesMessage"}, + "output":{ + "shape":"CertificateMessage", + "resultWrapper":"DescribeCertificatesResult" + }, + "errors":[ + {"shape":"CertificateNotFoundFault"} + ], + "documentation":"

Returns a list of certificate authority (CA) certificates provided by Amazon RDS for this AWS account.

" + }, "DescribeDBClusterParameterGroups":{ "name":"DescribeDBClusterParameterGroups", "http":{ @@ -870,6 +886,57 @@ }, "Boolean":{"type":"boolean"}, "BooleanOptional":{"type":"boolean"}, + "Certificate":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{ + "shape":"String", + "documentation":"

The unique key that identifies a certificate.

Example: rds-ca-2019

" + }, + "CertificateType":{ + "shape":"String", + "documentation":"

The type of the certificate.

Example: CA

" + }, + "Thumbprint":{ + "shape":"String", + "documentation":"

The thumbprint of the certificate.

" + }, + "ValidFrom":{ + "shape":"TStamp", + "documentation":"

The starting date-time from which the certificate is valid.

Example: 2019-07-31T17:57:09Z

" + }, + "ValidTill":{ + "shape":"TStamp", + "documentation":"

The date-time after which the certificate is no longer valid.

Example: 2024-07-31T17:57:09Z

" + }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the certificate.

Example: arn:aws:rds:us-east-1::cert:rds-ca-2019

" + } + }, + "documentation":"

A certificate authority (CA) certificate for an AWS account.

", + "wrapper":true + }, + "CertificateList":{ + "type":"list", + "member":{ + "shape":"Certificate", + "locationName":"Certificate" + } + }, + "CertificateMessage":{ + "type":"structure", + "members":{ + "Certificates":{ + "shape":"CertificateList", + "documentation":"

A list of certificates for this AWS account.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided if the number of records retrieved is greater than MaxRecords. If this parameter is specified, the marker specifies the next record in the list. Including the value of Marker in the next call to DescribeCertificates results in the next page of certificates.

" + } + } + }, "CertificateNotFoundFault":{ "type":"structure", "members":{ @@ -1016,11 +1083,11 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

The name of the master user for the DB cluster.

Constraints:

" + "documentation":"

The name of the master user for the DB cluster.

Constraints:

" }, "MasterUserPassword":{ "shape":"String", - "documentation":"

The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@).

Constraints: Must contain from 8 to 41 characters.

" + "documentation":"

The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@).

Constraints: Must contain from 8 to 100 characters.

" }, "PreferredBackupWindow":{ "shape":"String", @@ -1155,7 +1222,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB instance.

" + "documentation":"

The tags to be assigned to the DB instance. You can assign up to 10 tags to an instance.

" }, "DBClusterIdentifier":{ "shape":"String", @@ -1802,7 +1869,7 @@ }, "PubliclyAccessible":{ "shape":"Boolean", - "documentation":"

Specifies the availability options for the DB instance. A value of true specifies an internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

" + "documentation":"

Not supported. Amazon DocumentDB does not currently support public endpoints. The value of PubliclyAccessible is always false.

" }, "StatusInfos":{ "shape":"DBInstanceStatusInfoList", @@ -1814,7 +1881,7 @@ }, "StorageEncrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the DB instance is encrypted.

" + "documentation":"

Specifies whether or not the DB instance is encrypted.

" }, "KmsKeyId":{ "shape":"String", @@ -1824,6 +1891,10 @@ "shape":"String", "documentation":"

The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.

" }, + "CACertificateIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the CA certificate for this DB instance.

" + }, "PromotionTier":{ "shape":"IntegerOptional", "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

" @@ -2011,7 +2082,7 @@ }, "DBSubnetGroupArn":{ "shape":"String", - "documentation":"

The Amazon Resource Identifier (ARN) for the DB subnet group.

" + "documentation":"

The Amazon Resource Name (ARN) for the DB subnet group.

" } }, "documentation":"

Detailed information about a DB subnet group.

", @@ -2191,6 +2262,27 @@ }, "documentation":"

Represents the input to DeleteDBSubnetGroup.

" }, + "DescribeCertificatesMessage":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{ + "shape":"String", + "documentation":"

The user-supplied certificate identifier. If this parameter is specified, information for only the specified certificate is returned. If this parameter is omitted, a list of up to MaxRecords certificates is returned. This parameter is not case sensitive.

Constraints

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints:

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, "DescribeDBClusterParameterGroupsMessage":{ "type":"structure", "members":{ @@ -2979,7 +3071,7 @@ }, "MasterUserPassword":{ "shape":"String", - "documentation":"

The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@).

Constraints: Must contain from 8 to 41 characters.

" + "documentation":"

The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@).

Constraints: Must contain from 8 to 100 characters.

" }, "PreferredBackupWindow":{ "shape":"String", @@ -3088,6 +3180,10 @@ "shape":"String", "documentation":"

The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot occurs immediately if you set Apply Immediately to true. It occurs during the next maintenance window if you set Apply Immediately to false. This value is stored as a lowercase string.

Constraints:

Example: mydbinstance

" }, + "CACertificateIdentifier":{ + "shape":"String", + "documentation":"

Indicates the certificate that needs to be associated with the instance.

" + }, "PromotionTier":{ "shape":"IntegerOptional", "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

Default: 1

Valid values: 0-15

" diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index a4d1879f..0ad39c12 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -434,7 +434,7 @@ }, "input":{"shape":"CreateCustomerGatewayRequest"}, "output":{"shape":"CreateCustomerGatewayResult"}, - "documentation":"

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

" + "documentation":"

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

" }, "CreateDefaultSubnet":{ "name":"CreateDefaultSubnet", @@ -861,7 +861,7 @@ }, "input":{"shape":"CreateVpnConnectionRequest"}, "output":{"shape":"CreateVpnConnectionResult"}, - "documentation":"

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The supported connection types is ipsec.1.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" + "documentation":"

Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The supported connection type is ipsec.1.

The response includes information that you need to give to your network administrator to configure your customer gateway.

We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

" }, "CreateVpnConnectionRoute":{ "name":"CreateVpnConnectionRoute", @@ -1481,6 +1481,16 @@ "output":{"shape":"DescribeElasticGpusResult"}, "documentation":"

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

" }, + "DescribeExportImageTasks":{ + "name":"DescribeExportImageTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportImageTasksRequest"}, + "output":{"shape":"DescribeExportImageTasksResult"}, + "documentation":"

Describes the specified export image tasks or all your export image tasks.

" + }, "DescribeExportTasks":{ "name":"DescribeExportTasks", "http":{ @@ -1489,7 +1499,7 @@ }, "input":{"shape":"DescribeExportTasksRequest"}, "output":{"shape":"DescribeExportTasksResult"}, - "documentation":"

Describes the specified export tasks or all your export tasks.

" + "documentation":"

Describes the specified export instance tasks or all your export instance tasks.

" }, "DescribeFleetHistory":{ "name":"DescribeFleetHistory", @@ -2523,6 +2533,16 @@ "output":{"shape":"ExportClientVpnClientConfigurationResult"}, "documentation":"

Downloads the contents of the Client VPN endpoint configuration file for the specified Client VPN endpoint. The Client VPN endpoint configuration file includes the Client VPN endpoint and certificate information clients need to establish a connection with the Client VPN endpoint.

" }, + "ExportImage":{ + "name":"ExportImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportImageRequest"}, + "output":{"shape":"ExportImageResult"}, + "documentation":"

Exports an Amazon Machine Image (AMI) to a VM file. For more information, see Exporting a VM Directory from an Amazon Machine Image (AMI) in the VM Import/Export User Guide.

" + }, "ExportTransitGatewayRoutes":{ "name":"ExportTransitGatewayRoutes", "http":{ @@ -2751,7 +2771,7 @@ }, "input":{"shape":"ModifyFleetRequest"}, "output":{"shape":"ModifyFleetResult"}, - "documentation":"

Modifies the specified EC2 Fleet.

You can only modify an EC2 Fleet request of type maintain.

While the EC2 Fleet is being modified, it is in the modifying state.

To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches the additional Spot Instances according to the allocation strategy for the EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the EC2 Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools that are optimally chosen based on the available Spot Instance capacity.

To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 Fleet cancels any open requests that exceed the new target capacity. You can request that the EC2 Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the EC2 Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the EC2 Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your EC2 Fleet for now, but will use it again later, you can set the target capacity to 0.

" + "documentation":"

Modifies the specified EC2 Fleet.

You can only modify an EC2 Fleet request of type maintain.

While the EC2 Fleet is being modified, it is in the modifying state.

To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches the additional Spot Instances according to the allocation strategy for the EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the EC2 Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 Fleet cancels any open requests that exceed the new target capacity. You can request that the EC2 Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the EC2 Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the EC2 Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your EC2 Fleet for now, but will use it again later, you can set the target capacity to 0.

" }, "ModifyFpgaImageAttribute":{ "name":"ModifyFpgaImageAttribute", @@ -2895,7 +2915,7 @@ }, "input":{"shape":"ModifySpotFleetRequestRequest"}, "output":{"shape":"ModifySpotFleetRequestResponse"}, - "documentation":"

Modifies the specified Spot Fleet request.

You can only modify a Spot Fleet request of type maintain.

While the Spot Fleet request is being modified, it is in the modifying state.

To scale up your Spot Fleet, increase its target capacity. The Spot Fleet launches the additional Spot Instances according to the allocation strategy for the Spot Fleet request. If the allocation strategy is lowestPrice, the Spot Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the Spot Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools that are optimally chosen based on the available Spot Instance capacity.

To scale down your Spot Fleet, decrease its target capacity. First, the Spot Fleet cancels any open requests that exceed the new target capacity. You can request that the Spot Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the Spot Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the Spot Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your Spot Fleet for now, but will use it again later, you can set the target capacity to 0.

" + "documentation":"

Modifies the specified Spot Fleet request.

You can only modify a Spot Fleet request of type maintain.

While the Spot Fleet request is being modified, it is in the modifying state.

To scale up your Spot Fleet, increase its target capacity. The Spot Fleet launches the additional Spot Instances according to the allocation strategy for the Spot Fleet request. If the allocation strategy is lowestPrice, the Spot Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the Spot Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your Spot Fleet, decrease its target capacity. First, the Spot Fleet cancels any open requests that exceed the new target capacity. You can request that the Spot Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacityOptimized, the Spot Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the Spot Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your Spot Fleet for now, but will use it again later, you can set the target capacity to 0.

" }, "ModifySubnetAttribute":{ "name":"ModifySubnetAttribute", @@ -3042,7 +3062,27 @@ }, "input":{"shape":"ModifyVpnConnectionRequest"}, "output":{"shape":"ModifyVpnConnectionResult"}, - "documentation":"

Modifies the target gateway of a AWS Site-to-Site VPN connection. The following migration options are available:

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your s2slong; connection will be temporarily unavailable for approximately 10 minutes while we provision the new endpoints

" + "documentation":"

Modifies the target gateway of an AWS Site-to-Site VPN connection. The following migration options are available:

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your s2slong; connection will be temporarily unavailable for approximately 10 minutes while we provision the new endpoints

" + }, + "ModifyVpnTunnelCertificate":{ + "name":"ModifyVpnTunnelCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpnTunnelCertificateRequest"}, + "output":{"shape":"ModifyVpnTunnelCertificateResult"}, + "documentation":"

Modifies the VPN tunnel endpoint certificate.

" + }, + "ModifyVpnTunnelOptions":{ + "name":"ModifyVpnTunnelOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpnTunnelOptionsRequest"}, + "output":{"shape":"ModifyVpnTunnelOptionsResult"}, + "documentation":"

Modifies the options for a VPN tunnel in an AWS Site-to-Site VPN connection. You can modify multiple options for a tunnel in a single request, but you can only modify one tunnel at a time. For more information, see Site-to-Site VPN Tunnel Options for Your Site-to-Site VPN Connection in the AWS Site-to-Site VPN User Guide.

" }, "MonitorInstances":{ "name":"MonitorInstances", @@ -3390,7 +3430,7 @@ "requestUri":"/" }, "input":{"shape":"SendDiagnosticInterruptRequest"}, - "documentation":"

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic Interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

" + "documentation":"

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic Interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

" }, "StartInstances":{ "name":"StartInstances", @@ -6620,7 +6660,7 @@ }, "ThreadsPerCore":{ "shape":"Integer", - "documentation":"

The number of threads per CPU core. To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. Otherwise, specify the default value of 2.

" + "documentation":"

The number of threads per CPU core. To disable multithreading for the instance, specify a value of 1. Otherwise, specify the default value of 2.

" } }, "documentation":"

The CPU options for the instance. Both the core count and threads per core must be specified in the request.

" @@ -6828,7 +6868,6 @@ "type":"structure", "required":[ "BgpAsn", - "PublicIp", "Type" ], "members":{ @@ -6841,6 +6880,10 @@ "documentation":"

The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

", "locationName":"IpAddress" }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the customer gateway certificate.

" + }, "Type":{ "shape":"GatewayType", "documentation":"

The type of VPN connection that this customer gateway supports (ipsec.1).

" @@ -7163,6 +7206,10 @@ "LogDestination":{ "shape":"String", "documentation":"

Specifies the destination to which the flow log data is to be published. Flow log data can be published to a CloudWatch Logs log group or an Amazon S3 bucket. The value specified for this parameter depends on the value specified for LogDestinationType.

If LogDestinationType is not specified or cloud-watch-logs, specify the Amazon Resource Name (ARN) of the CloudWatch Logs log group.

If LogDestinationType is s3, specify the ARN of the Amazon S3 bucket. You can also specify a subfolder in the bucket. To specify a subfolder in the bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, to specify a subfolder named my-logs in a bucket named my-bucket, use the following ARN: arn:aws:s3:::my-bucket/my-logs/. You cannot use AWSLogs as a subfolder name. This is a reserved term.

" + }, + "LogFormat":{ + "shape":"String", + "documentation":"

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow Log Records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the AWS CLI, use single quotation marks (' ') to surround the parameter value.

Only applicable to flow logs that are published to an Amazon S3 bucket.

" } } }, @@ -8876,6 +8923,11 @@ "documentation":"

The Internet-routable IP address of the customer gateway's outside interface.

", "locationName":"ipAddress" }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the customer gateway certificate.

", + "locationName":"certificateArn" + }, "State":{ "shape":"String", "documentation":"

The current state of the customer gateway (pending | available | deleting | deleted).

", @@ -10774,6 +10826,53 @@ } } }, + "DescribeExportImageTasksMaxResults":{ + "type":"integer", + "max":500, + "min":1 + }, + "DescribeExportImageTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, or deleted.

", + "locationName":"Filter" + }, + "ExportImageTaskIds":{ + "shape":"ExportImageTaskIdList", + "documentation":"

The IDs of the export image tasks.

", + "locationName":"ExportImageTaskId" + }, + "MaxResults":{ + "shape":"DescribeExportImageTasksMaxResults", + "documentation":"

The maximum number of results to return in a single call.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that indicates the next page of results.

" + } + } + }, + "DescribeExportImageTasksResult":{ + "type":"structure", + "members":{ + "ExportImageTasks":{ + "shape":"ExportImageTaskList", + "documentation":"

Information about the export image tasks.

", + "locationName":"exportImageTaskSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeExportTasksRequest":{ "type":"structure", "members":{ @@ -11426,16 +11525,16 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, deleted.

" + "documentation":"

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, or deleted.

" }, "ImportTaskIds":{ "shape":"ImportTaskIdList", - "documentation":"

A list of import image task IDs.

", + "documentation":"

The IDs of the import image tasks.

", "locationName":"ImportTaskId" }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of results to return in a single call.

" }, "NextToken":{ "shape":"String", @@ -14099,7 +14198,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -15530,6 +15629,150 @@ "microsoft" ] }, + "ExportImageRequest":{ + "type":"structure", + "required":[ + "DiskImageFormat", + "ImageId", + "S3ExportLocation" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "documentation":"

Token to enable idempotency for export image requests.

", + "idempotencyToken":true + }, + "Description":{ + "shape":"String", + "documentation":"

A description of the image being exported. The maximum length is 255 bytes.

" + }, + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "documentation":"

The disk image format.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ImageId":{ + "shape":"String", + "documentation":"

The ID of the image.

" + }, + "S3ExportLocation":{ + "shape":"ExportTaskS3LocationRequest", + "documentation":"

Information about the destination S3 bucket. The bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

" + }, + "RoleName":{ + "shape":"String", + "documentation":"

The name of the role that grants VM Import/Export permission to export images to your S3 bucket. If this parameter is not specified, the default role is named 'vmimport'.

" + } + } + }, + "ExportImageResult":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

A description of the image being exported.

", + "locationName":"description" + }, + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "documentation":"

The disk image format for the exported image.

", + "locationName":"diskImageFormat" + }, + "ExportImageTaskId":{ + "shape":"String", + "documentation":"

The ID of the export image task.

", + "locationName":"exportImageTaskId" + }, + "ImageId":{ + "shape":"String", + "documentation":"

The ID of the image.

", + "locationName":"imageId" + }, + "RoleName":{ + "shape":"String", + "documentation":"

The name of the role that grants VM Import/Export permission to export images to your S3 bucket.

", + "locationName":"roleName" + }, + "Progress":{ + "shape":"String", + "documentation":"

The percent complete of the export image task.

", + "locationName":"progress" + }, + "S3ExportLocation":{ + "shape":"ExportTaskS3Location", + "documentation":"

Information about the destination S3 bucket.

", + "locationName":"s3ExportLocation" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the export image task. The possible values are active, completed, deleting, and deleted.

", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

The status message for the export image task.

", + "locationName":"statusMessage" + } + } + }, + "ExportImageTask":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

A description of the image being exported.

", + "locationName":"description" + }, + "ExportImageTaskId":{ + "shape":"String", + "documentation":"

The ID of the export image task.

", + "locationName":"exportImageTaskId" + }, + "ImageId":{ + "shape":"String", + "documentation":"

The ID of the image.

", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "documentation":"

The percent complete of the export image task.

", + "locationName":"progress" + }, + "S3ExportLocation":{ + "shape":"ExportTaskS3Location", + "documentation":"

Information about the destination S3 bucket.

", + "locationName":"s3ExportLocation" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the export image task. The possible values are active, completed, deleting, and deleted.

", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

The status message for the export image task.

", + "locationName":"statusMessage" + } + }, + "documentation":"

Describes an export image task.

" + }, + "ExportImageTaskIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExportImageTaskId" + } + }, + "ExportImageTaskList":{ + "type":"list", + "member":{ + "shape":"ExportImageTask", + "locationName":"item" + } + }, "ExportTask":{ "type":"structure", "members":{ @@ -15580,6 +15823,37 @@ "locationName":"item" } }, + "ExportTaskS3Location":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"String", + "documentation":"

The destination S3 bucket.

", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"

The prefix (logical hierarchy) in the bucket.

", + "locationName":"s3Prefix" + } + }, + "documentation":"

Describes the destination for an export image task.

" + }, + "ExportTaskS3LocationRequest":{ + "type":"structure", + "required":["S3Bucket"], + "members":{ + "S3Bucket":{ + "shape":"String", + "documentation":"

The destination S3 bucket.

" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"

The prefix (logical hierarchy) in the bucket.

" + } + }, + "documentation":"

Describes the destination for an export image task.

" + }, "ExportTaskState":{ "type":"string", "enum":[ @@ -15703,8 +15977,8 @@ "type":"string", "enum":[ "error", - "pending-fulfillment", - "pending-termination", + "pending_fulfillment", + "pending_termination", "fulfilled" ] }, @@ -16029,8 +16303,8 @@ "active", "deleted", "failed", - "deleted-running", - "deleted-terminating", + "deleted_running", + "deleted_terminating", "modifying" ] }, @@ -16100,6 +16374,11 @@ "shape":"String", "documentation":"

Specifies the destination to which the flow log data is published. Flow log data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. If the flow log publishes to CloudWatch Logs, this element indicates the Amazon Resource Name (ARN) of the CloudWatch Logs log group to which the data is published. If the flow log publishes to Amazon S3, this element indicates the ARN of the Amazon S3 bucket to which the data is published.

", "locationName":"logDestination" + }, + "LogFormat":{ + "shape":"String", + "documentation":"

The format of the flow log record.

", + "locationName":"logFormat" } }, "documentation":"

Describes a flow log.

" @@ -17173,6 +17452,41 @@ "xen" ] }, + "IKEVersionsList":{ + "type":"list", + "member":{ + "shape":"IKEVersionsListValue", + "locationName":"item" + } + }, + "IKEVersionsListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The IKE version.

", + "locationName":"value" + } + }, + "documentation":"

The internet key exchange (IKE) version permitted for the VPN tunnel.

" + }, + "IKEVersionsRequestList":{ + "type":"list", + "member":{ + "shape":"IKEVersionsRequestListValue", + "locationName":"item" + } + }, + "IKEVersionsRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The IKE version.

" + } + }, + "documentation":"

The IKE version that is permitted for the VPN tunnel.

" + }, "IamInstanceProfile":{ "type":"structure", "members":{ @@ -19410,6 +19724,12 @@ "g3.8xlarge", "g3.16xlarge", "g3s.xlarge", + "g4dn.xlarge", + "g4dn.2xlarge", + "g4dn.4xlarge", + "g4dn.8xlarge", + "g4dn.12xlarge", + "g4dn.16xlarge", "cg1.4xlarge", "p2.xlarge", "p2.8xlarge", @@ -20077,7 +20397,7 @@ }, "ThreadsPerCore":{ "shape":"Integer", - "documentation":"

The number of threads per CPU core. To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. Otherwise, specify the default value of 2.

" + "documentation":"

The number of threads per CPU core. To disable multithreading for the instance, specify a value of 1. Otherwise, specify the default value of 2.

" } }, "documentation":"

The CPU options for the instance. Both the core count and threads per core must be specified in the request.

" @@ -22012,7 +22332,7 @@ }, "PolicyDocument":{ "shape":"String", - "documentation":"

A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

" + "documentation":"

A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format.

" }, "AddRouteTableIds":{ "shape":"ValueStringList", @@ -22211,6 +22531,10 @@ "shape":"String", "documentation":"

The ID of the transit gateway.

" }, + "CustomerGatewayId":{ + "shape":"String", + "documentation":"

The ID of the customer gateway at your end of the VPN connection.

" + }, "VpnGatewayId":{ "shape":"String", "documentation":"

The ID of the virtual private gateway at the AWS side of the VPN connection.

" @@ -22230,6 +22554,144 @@ } } }, + "ModifyVpnTunnelCertificateRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "VpnTunnelOutsideIpAddress" + ], + "members":{ + "VpnConnectionId":{ + "shape":"String", + "documentation":"

The ID of the AWS Site-to-Site VPN connection.

" + }, + "VpnTunnelOutsideIpAddress":{ + "shape":"String", + "documentation":"

The external IP address of the VPN tunnel.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyVpnTunnelCertificateResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "ModifyVpnTunnelOptionsRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "VpnTunnelOutsideIpAddress", + "TunnelOptions" + ], + "members":{ + "VpnConnectionId":{ + "shape":"String", + "documentation":"

The ID of the AWS Site-to-Site VPN connection.

" + }, + "VpnTunnelOutsideIpAddress":{ + "shape":"String", + "documentation":"

The external IP address of the VPN tunnel.

" + }, + "TunnelOptions":{ + "shape":"ModifyVpnTunnelOptionsSpecification", + "documentation":"

The tunnel options to modify.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyVpnTunnelOptionsResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "ModifyVpnTunnelOptionsSpecification":{ + "type":"structure", + "members":{ + "TunnelInsideCidr":{ + "shape":"String", + "documentation":"

The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway.

Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following CIDR blocks are reserved and cannot be used:

" + }, + "PreSharedKey":{ + "shape":"String", + "documentation":"

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

" + }, + "Phase1LifetimeSeconds":{ + "shape":"Integer", + "documentation":"

The lifetime for phase 1 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 28,800.

Default: 28800

" + }, + "Phase2LifetimeSeconds":{ + "shape":"Integer", + "documentation":"

The lifetime for phase 2 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 3,600. The value must be less than the value for Phase1LifetimeSeconds.

Default: 3600

" + }, + "RekeyMarginTimeSeconds":{ + "shape":"Integer", + "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 540

" + }, + "RekeyFuzzPercentage":{ + "shape":"Integer", + "documentation":"

The percentage of the rekey window (determined by RekeyMarginTimeSeconds) during which the rekey time is randomly selected.

Constraints: A value between 0 and 100.

Default: 100

" + }, + "ReplayWindowSize":{ + "shape":"Integer", + "documentation":"

The number of packets in an IKE replay window.

Constraints: A value between 64 and 2048.

Default: 1024

" + }, + "DPDTimeoutSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds after which a DPD timeout occurs.

Constraints: A value between 0 and 30.

Default: 30

" + }, + "Phase1EncryptionAlgorithms":{ + "shape":"Phase1EncryptionAlgorithmsRequestList", + "documentation":"

One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: AES128 | AES256

", + "locationName":"Phase1EncryptionAlgorithm" + }, + "Phase2EncryptionAlgorithms":{ + "shape":"Phase2EncryptionAlgorithmsRequestList", + "documentation":"

One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: AES128 | AES256

", + "locationName":"Phase2EncryptionAlgorithm" + }, + "Phase1IntegrityAlgorithms":{ + "shape":"Phase1IntegrityAlgorithmsRequestList", + "documentation":"

One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: SHA1 | SHA2-256

", + "locationName":"Phase1IntegrityAlgorithm" + }, + "Phase2IntegrityAlgorithms":{ + "shape":"Phase2IntegrityAlgorithmsRequestList", + "documentation":"

One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: SHA1 | SHA2-256

", + "locationName":"Phase2IntegrityAlgorithm" + }, + "Phase1DHGroupNumbers":{ + "shape":"Phase1DHGroupNumbersRequestList", + "documentation":"

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

", + "locationName":"Phase1DHGroupNumber" + }, + "Phase2DHGroupNumbers":{ + "shape":"Phase2DHGroupNumbersRequestList", + "documentation":"

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

", + "locationName":"Phase2DHGroupNumber" + }, + "IKEVersions":{ + "shape":"IKEVersionsRequestList", + "documentation":"

The IKE versions that are permitted for the VPN tunnel.

Valid values: ikev1 | ikev2

", + "locationName":"IKEVersion" + } + }, + "documentation":"

The AWS Site-to-Site VPN tunnel options to modify.

" + }, "MonitorInstancesRequest":{ "type":"structure", "required":["InstanceIds"], @@ -23143,6 +23605,216 @@ "type":"string", "enum":["all"] }, + "Phase1DHGroupNumbersList":{ + "type":"list", + "member":{ + "shape":"Phase1DHGroupNumbersListValue", + "locationName":"item" + } + }, + "Phase1DHGroupNumbersListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Integer", + "documentation":"

The Diffie-Hellmann group number.

", + "locationName":"value" + } + }, + "documentation":"

The Diffie-Hellmann group number for phase 1 IKE negotiations.

" + }, + "Phase1DHGroupNumbersRequestList":{ + "type":"list", + "member":{ + "shape":"Phase1DHGroupNumbersRequestListValue", + "locationName":"item" + } + }, + "Phase1DHGroupNumbersRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Integer", + "documentation":"

The Diffie-Hellmann group number.

" + } + }, + "documentation":"

Specifies a Diffie-Hellman group number for the VPN tunnel for phase 1 IKE negotiations.

" + }, + "Phase1EncryptionAlgorithmsList":{ + "type":"list", + "member":{ + "shape":"Phase1EncryptionAlgorithmsListValue", + "locationName":"item" + } + }, + "Phase1EncryptionAlgorithmsListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The value for the encryption algorithm.

", + "locationName":"value" + } + }, + "documentation":"

The encryption algorithm for phase 1 IKE negotiations.

" + }, + "Phase1EncryptionAlgorithmsRequestList":{ + "type":"list", + "member":{ + "shape":"Phase1EncryptionAlgorithmsRequestListValue", + "locationName":"item" + } + }, + "Phase1EncryptionAlgorithmsRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The value for the encryption algorithm.

" + } + }, + "documentation":"

Specifies the encryption algorithm for the VPN tunnel for phase 1 IKE negotiations.

" + }, + "Phase1IntegrityAlgorithmsList":{ + "type":"list", + "member":{ + "shape":"Phase1IntegrityAlgorithmsListValue", + "locationName":"item" + } + }, + "Phase1IntegrityAlgorithmsListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The value for the integrity algorithm.

", + "locationName":"value" + } + }, + "documentation":"

The integrity algorithm for phase 1 IKE negotiations.

" + }, + "Phase1IntegrityAlgorithmsRequestList":{ + "type":"list", + "member":{ + "shape":"Phase1IntegrityAlgorithmsRequestListValue", + "locationName":"item" + } + }, + "Phase1IntegrityAlgorithmsRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The value for the integrity algorithm.

" + } + }, + "documentation":"

Specifies the integrity algorithm for the VPN tunnel for phase 1 IKE negotiations.

" + }, + "Phase2DHGroupNumbersList":{ + "type":"list", + "member":{ + "shape":"Phase2DHGroupNumbersListValue", + "locationName":"item" + } + }, + "Phase2DHGroupNumbersListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Integer", + "documentation":"

The Diffie-Hellmann group number.

", + "locationName":"value" + } + }, + "documentation":"

The Diffie-Hellmann group number for phase 2 IKE negotiations.

" + }, + "Phase2DHGroupNumbersRequestList":{ + "type":"list", + "member":{ + "shape":"Phase2DHGroupNumbersRequestListValue", + "locationName":"item" + } + }, + "Phase2DHGroupNumbersRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Integer", + "documentation":"

The Diffie-Hellmann group number.

" + } + }, + "documentation":"

Specifies a Diffie-Hellman group number for the VPN tunnel for phase 2 IKE negotiations.

" + }, + "Phase2EncryptionAlgorithmsList":{ + "type":"list", + "member":{ + "shape":"Phase2EncryptionAlgorithmsListValue", + "locationName":"item" + } + }, + "Phase2EncryptionAlgorithmsListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The encryption algorithm.

", + "locationName":"value" + } + }, + "documentation":"

The encryption algorithm for phase 2 IKE negotiations.

" + }, + "Phase2EncryptionAlgorithmsRequestList":{ + "type":"list", + "member":{ + "shape":"Phase2EncryptionAlgorithmsRequestListValue", + "locationName":"item" + } + }, + "Phase2EncryptionAlgorithmsRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The encryption algorithm.

" + } + }, + "documentation":"

Specifies the encryption algorithm for the VPN tunnel for phase 2 IKE negotiations.

" + }, + "Phase2IntegrityAlgorithmsList":{ + "type":"list", + "member":{ + "shape":"Phase2IntegrityAlgorithmsListValue", + "locationName":"item" + } + }, + "Phase2IntegrityAlgorithmsListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The integrity algorithm.

", + "locationName":"value" + } + }, + "documentation":"

The integrity algorithm for phase 2 IKE negotiations.

" + }, + "Phase2IntegrityAlgorithmsRequestList":{ + "type":"list", + "member":{ + "shape":"Phase2IntegrityAlgorithmsRequestListValue", + "locationName":"item" + } + }, + "Phase2IntegrityAlgorithmsRequestListValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The integrity algorithm.

" + } + }, + "documentation":"

Specifies the integrity algorithm for the VPN tunnel for phase 2 IKE negotiations.

" + }, "Placement":{ "type":"structure", "members":{ @@ -27705,7 +28377,7 @@ "members":{ "AllocationStrategy":{ "shape":"AllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet request.

If the allocation strategy is lowestPrice, Spot Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, Spot Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools that are optimally chosen based on the available Spot Instance capacity.

", + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet request.

If the allocation strategy is lowestPrice, Spot Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, Spot Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", "locationName":"allocationStrategy" }, "OnDemandAllocationStrategy":{ @@ -28054,7 +28726,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowestPrice, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools that are optimally chosen based on the available Spot Instance capacity.

", + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowestPrice, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", "locationName":"allocationStrategy" }, "InstanceInterruptionBehavior":{ @@ -28095,7 +28767,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowestPrice, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools that are optimally chosen based on the available Spot Instance capacity.

" + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowestPrice, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

" }, "InstanceInterruptionBehavior":{ "shape":"SpotInstanceInterruptionBehavior", @@ -29936,10 +30608,96 @@ "udp" ] }, + "TunnelOption":{ + "type":"structure", + "members":{ + "OutsideIpAddress":{ + "shape":"String", + "documentation":"

The external IP address of the VPN tunnel.

", + "locationName":"outsideIpAddress" + }, + "TunnelInsideCidr":{ + "shape":"String", + "documentation":"

The range of inside IP addresses for the tunnel.

", + "locationName":"tunnelInsideCidr" + }, + "PreSharedKey":{ + "shape":"String", + "documentation":"

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

", + "locationName":"preSharedKey" + }, + "Phase1LifetimeSeconds":{ + "shape":"Integer", + "documentation":"

The lifetime for phase 1 of the IKE negotiation, in seconds.

", + "locationName":"phase1LifetimeSeconds" + }, + "Phase2LifetimeSeconds":{ + "shape":"Integer", + "documentation":"

The lifetime for phase 2 of the IKE negotiation, in seconds.

", + "locationName":"phase2LifetimeSeconds" + }, + "RekeyMarginTimeSeconds":{ + "shape":"Integer", + "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey.

", + "locationName":"rekeyMarginTimeSeconds" + }, + "RekeyFuzzPercentage":{ + "shape":"Integer", + "documentation":"

The percentage of the rekey window determined by RekeyMarginTimeSeconds during which the rekey time is randomly selected.

", + "locationName":"rekeyFuzzPercentage" + }, + "ReplayWindowSize":{ + "shape":"Integer", + "documentation":"

The number of packets in an IKE replay window.

", + "locationName":"replayWindowSize" + }, + "DpdTimeoutSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds after which a DPD timeout occurs.

", + "locationName":"dpdTimeoutSeconds" + }, + "Phase1EncryptionAlgorithms":{ + "shape":"Phase1EncryptionAlgorithmsList", + "documentation":"

The permitted encryption algorithms for the VPN tunnel for phase 1 IKE negotiations.

", + "locationName":"phase1EncryptionAlgorithmSet" + }, + "Phase2EncryptionAlgorithms":{ + "shape":"Phase2EncryptionAlgorithmsList", + "documentation":"

The permitted encryption algorithms for the VPN tunnel for phase 2 IKE negotiations.

", + "locationName":"phase2EncryptionAlgorithmSet" + }, + "Phase1IntegrityAlgorithms":{ + "shape":"Phase1IntegrityAlgorithmsList", + "documentation":"

The permitted integrity algorithms for the VPN tunnel for phase 1 IKE negotiations.

", + "locationName":"phase1IntegrityAlgorithmSet" + }, + "Phase2IntegrityAlgorithms":{ + "shape":"Phase2IntegrityAlgorithmsList", + "documentation":"

The permitted integrity algorithms for the VPN tunnel for phase 2 IKE negotiations.

", + "locationName":"phase2IntegrityAlgorithmSet" + }, + "Phase1DHGroupNumbers":{ + "shape":"Phase1DHGroupNumbersList", + "documentation":"

The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 1 IKE negotiations.

", + "locationName":"phase1DHGroupNumberSet" + }, + "Phase2DHGroupNumbers":{ + "shape":"Phase2DHGroupNumbersList", + "documentation":"

The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 2 IKE negotiations.

", + "locationName":"phase2DHGroupNumberSet" + }, + "IkeVersions":{ + "shape":"IKEVersionsList", + "documentation":"

The IKE versions that are permitted for the VPN tunnel.

", + "locationName":"ikeVersionSet" + } + }, + "documentation":"

The VPN tunnel options.

" + }, "TunnelOptionsList":{ "type":"list", "member":{ - "shape":"VpnTunnelOptionsSpecification", + "shape":"TunnelOption", "locationName":"item" } }, @@ -30336,6 +31094,11 @@ "shape":"String", "documentation":"

If an error occurs, a description of the error.

", "locationName":"statusMessage" + }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate.

", + "locationName":"certificateArn" } }, "documentation":"

Describes telemetry for a VPN tunnel.

" @@ -31371,6 +32134,11 @@ "shape":"Boolean", "documentation":"

Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

", "locationName":"staticRoutesOnly" + }, + "TunnelOptions":{ + "shape":"TunnelOptionsList", + "documentation":"

Indicates the VPN tunnel options.

", + "locationName":"tunnelOptionSet" } }, "documentation":"

Describes VPN connection options.

" @@ -31384,7 +32152,7 @@ "locationName":"staticRoutesOnly" }, "TunnelOptions":{ - "shape":"TunnelOptionsList", + "shape":"VpnTunnelOptionsSpecificationsList", "documentation":"

The tunnel options for the VPN connection.

" } }, @@ -31506,11 +32274,74 @@ }, "PreSharedKey":{ "shape":"String", - "documentation":"

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.

Constraints: Allowed characters are alphanumeric characters and ._. Must be between 8 and 64 characters in length and cannot start with zero (0).

" + "documentation":"

The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.

Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

" + }, + "Phase1LifetimeSeconds":{ + "shape":"Integer", + "documentation":"

The lifetime for phase 1 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 28,800.

Default: 28800

" + }, + "Phase2LifetimeSeconds":{ + "shape":"Integer", + "documentation":"

The lifetime for phase 2 of the IKE negotiation, in seconds.

Constraints: A value between 900 and 3,600. The value must be less than the value for Phase1LifetimeSeconds.

Default: 3600

" + }, + "RekeyMarginTimeSeconds":{ + "shape":"Integer", + "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 540

" + }, + "RekeyFuzzPercentage":{ + "shape":"Integer", + "documentation":"

The percentage of the rekey window (determined by RekeyMarginTimeSeconds) during which the rekey time is randomly selected.

Constraints: A value between 0 and 100.

Default: 100

" + }, + "ReplayWindowSize":{ + "shape":"Integer", + "documentation":"

The number of packets in an IKE replay window.

Constraints: A value between 64 and 2048.

Default: 1024

" + }, + "DPDTimeoutSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds after which a DPD timeout occurs.

Constraints: A value between 0 and 30.

Default: 30

" + }, + "Phase1EncryptionAlgorithms":{ + "shape":"Phase1EncryptionAlgorithmsRequestList", + "documentation":"

One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: AES128 | AES256

", + "locationName":"Phase1EncryptionAlgorithm" + }, + "Phase2EncryptionAlgorithms":{ + "shape":"Phase2EncryptionAlgorithmsRequestList", + "documentation":"

One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: AES128 | AES256

", + "locationName":"Phase2EncryptionAlgorithm" + }, + "Phase1IntegrityAlgorithms":{ + "shape":"Phase1IntegrityAlgorithmsRequestList", + "documentation":"

One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: SHA1 | SHA2-256

", + "locationName":"Phase1IntegrityAlgorithm" + }, + "Phase2IntegrityAlgorithms":{ + "shape":"Phase2IntegrityAlgorithmsRequestList", + "documentation":"

One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: SHA1 | SHA2-256

", + "locationName":"Phase2IntegrityAlgorithm" + }, + "Phase1DHGroupNumbers":{ + "shape":"Phase1DHGroupNumbersRequestList", + "documentation":"

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.

Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

", + "locationName":"Phase1DHGroupNumber" + }, + "Phase2DHGroupNumbers":{ + "shape":"Phase2DHGroupNumbersRequestList", + "documentation":"

One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.

Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24

", + "locationName":"Phase2DHGroupNumber" + }, + "IKEVersions":{ + "shape":"IKEVersionsRequestList", + "documentation":"

The IKE versions that are permitted for the VPN tunnel.

Valid values: ikev1 | ikev2

", + "locationName":"IKEVersion" } }, "documentation":"

The tunnel options for a VPN connection.

" }, + "VpnTunnelOptionsSpecificationsList":{ + "type":"list", + "member":{"shape":"VpnTunnelOptionsSpecification"} + }, "WithdrawByoipCidrRequest":{ "type":"structure", "required":["Cidr"], diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index f8c78267..5ddac9b7 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -46,7 +46,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" + "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" }, "CreateTaskSet":{ "name":"CreateTaskSet", @@ -134,7 +134,7 @@ {"shape":"ClusterNotFoundException"}, {"shape":"ServiceNotFoundException"} ], - "documentation":"

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

" + "documentation":"

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in the ListServices API operation. After all tasks have transitioned to either STOPPING or STOPPED status, the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on those services return a ServiceNotFoundException error.

If you attempt to create a new service with the same name as an existing service in either ACTIVE or DRAINING status, you receive an error.

" }, "DeleteTaskSet":{ "name":"DeleteTaskSet", @@ -452,7 +452,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Modifies an account setting. If you change the account setting for the root user, the default settings for all of the IAM users and roles for which no individual account setting has been specified are reset. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. You must enable this setting to use Amazon ECS features such as resource tagging.

When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is enabled, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

When containerInsights is specified, the default setting indicating whether CloudWatch Container Insights is enabled for your clusters is changed. If containerInsights is enabled, any new clusters that are created will have Container Insights enabled unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Modifies an account setting. Account settings are set on a per-Region basis.

If you change the account setting for the root user, the default settings for all of the IAM users and roles for which no individual account setting has been specified are reset. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource will be defined by the opt-in status of the IAM user or role that created the resource. You must enable this setting to use Amazon ECS features such as resource tagging.

When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is enabled, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

When containerInsights is specified, the default setting indicating whether CloudWatch Container Insights is enabled for your clusters is changed. If containerInsights is enabled, any new clusters that are created will have Container Insights enabled unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

" }, "PutAccountSettingDefault":{ "name":"PutAccountSettingDefault", @@ -467,7 +467,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Modifies an account setting for all IAM users on an account for whom no individual account setting has been specified.

" + "documentation":"

Modifies an account setting for all IAM users on an account for whom no individual account setting has been specified. Account settings are set on a per-Region basis.

" }, "PutAttributes":{ "name":"PutAttributes", @@ -610,7 +610,8 @@ "errors":[ {"shape":"ServerException"}, {"shape":"ClientException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterException"} ], "documentation":"

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Sent to acknowledge that a task changed states.

" }, @@ -648,6 +649,22 @@ ], "documentation":"

Deletes specified tags from a resource.

" }, + "UpdateClusterSettings":{ + "name":"UpdateClusterSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateClusterSettingsRequest"}, + "output":{"shape":"UpdateClusterSettingsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Modifies the settings to use for a cluster.

" + }, "UpdateContainerAgent":{ "name":"UpdateContainerAgent", "http":{ @@ -940,7 +957,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "settings":{ "shape":"ClusterSettings", @@ -1047,6 +1064,18 @@ "shape":"String", "documentation":"

The name of the container.

" }, + "image":{ + "shape":"String", + "documentation":"

The image used for the container.

" + }, + "imageDigest":{ + "shape":"String", + "documentation":"

The container image manifest digest.

The imageDigest is only returned if the container is using an image hosted in Amazon ECR, otherwise it is omitted.

" + }, + "runtimeId":{ + "shape":"String", + "documentation":"

The ID of the Docker container.

" + }, "lastStatus":{ "shape":"String", "documentation":"

The last known status of the container.

" @@ -1120,11 +1149,11 @@ }, "memory":{ "shape":"BoxedInteger", - "documentation":"

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "links":{ "shape":"StringList", @@ -1172,11 +1201,11 @@ }, "startTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

" + "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

" }, "stopTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes. This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes. This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" }, "hostname":{ "shape":"String", @@ -1236,7 +1265,7 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, syslog, gelf, fluentd, splunk, journald, and json-file.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheck":{ "shape":"HealthCheck", @@ -1249,6 +1278,10 @@ "resourceRequirements":{ "shape":"ResourceRequirements", "documentation":"

The type and amount of a resource to assign to a container. The only supported resource is a GPU.

" + }, + "firelensConfiguration":{ + "shape":"FirelensConfiguration", + "documentation":"

The FireLens configuration for the container. This is used to specify and configure a log router for container logs. For more information, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

" } }, "documentation":"

Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

" @@ -1344,7 +1377,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" } }, "documentation":"

An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

" @@ -1416,6 +1449,14 @@ "shape":"String", "documentation":"

The name of the container.

" }, + "imageDigest":{ + "shape":"String", + "documentation":"

The container image SHA 256 digest.

" + }, + "runtimeId":{ + "shape":"String", + "documentation":"

The ID of the Docker container.

" + }, "exitCode":{ "shape":"BoxedInteger", "documentation":"

The exit code for the container, if the state change is a result of the container exiting.

" @@ -1452,7 +1493,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "settings":{ "shape":"ClusterSettings", @@ -1487,7 +1528,7 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A load balancer object representing the load balancer to use with your service.

If the service is using the ECS deployment controller, you are limited to one load balancer or target group.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", @@ -1495,7 +1536,7 @@ }, "desiredCount":{ "shape":"BoxedInteger", - "documentation":"

The number of instantiations of the specified task definition to place and keep running on your cluster.

" + "documentation":"

The number of instantiations of the specified task definition to place and keep running on your cluster.

This is required if schedulingStrategy is REPLICA or is not specified. If schedulingStrategy is DAEMON then this is not required.

" }, "clientToken":{ "shape":"String", @@ -1511,7 +1552,7 @@ }, "role":{ "shape":"String", - "documentation":"

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode, in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

" + "documentation":"

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode or if the service is configured to use service discovery, an external deployment controller, or multiple target groups in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

" }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -1543,7 +1584,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

" }, "enableECSManagedTags":{ "shape":"Boolean", @@ -1986,7 +2027,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that is applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that is applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" } } }, @@ -2178,6 +2219,33 @@ "type":"list", "member":{"shape":"Failure"} }, + "FirelensConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"FirelensConfigurationType", + "documentation":"

The log router to use. The valid values are fluentd or fluentbit.

" + }, + "options":{ + "shape":"FirelensConfigurationOptionsMap", + "documentation":"

The options to use when configuring the log router. This field is optional and can be used to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is \"options\":{\"enable-ecs-log-metadata\":\"true|false\"}.

" + } + }, + "documentation":"

The FireLens configuration for the container. This is used to specify and configure a log router for container logs. For more information, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

" + }, + "FirelensConfigurationOptionsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "FirelensConfigurationType":{ + "type":"string", + "enum":[ + "fluentd", + "fluentbit" + ] + }, "GpuIds":{ "type":"list", "member":{"shape":"String"} @@ -2249,6 +2317,46 @@ }, "documentation":"

Details on a container instance bind mount host volume.

" }, + "InferenceAccelerator":{ + "type":"structure", + "required":[ + "deviceName", + "deviceType" + ], + "members":{ + "deviceName":{ + "shape":"String", + "documentation":"

The Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement.

" + }, + "deviceType":{ + "shape":"String", + "documentation":"

The Elastic Inference accelerator type to use.

" + } + }, + "documentation":"

Details on a Elastic Inference accelerator. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + }, + "InferenceAcceleratorOverride":{ + "type":"structure", + "members":{ + "deviceName":{ + "shape":"String", + "documentation":"

The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition.

" + }, + "deviceType":{ + "shape":"String", + "documentation":"

The Elastic Inference accelerator type to use.

" + } + }, + "documentation":"

Details on an Elastic Inference accelerator task override. This parameter is used to override the Elastic Inference accelerator specified in the task definition. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + }, + "InferenceAcceleratorOverrides":{ + "type":"list", + "member":{"shape":"InferenceAcceleratorOverride"} + }, + "InferenceAccelerators":{ + "type":"list", + "member":{"shape":"InferenceAccelerator"} + }, "Integer":{"type":"integer"}, "InvalidParameterException":{ "type":"structure", @@ -2322,6 +2430,14 @@ "tmpfs":{ "shape":"TmpfsList", "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" + }, + "maxSwap":{ + "shape":"BoxedInteger", + "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

If you are using tasks that use the Fargate launch type, the maxSwap parameter is not supported.

" + }, + "swappiness":{ + "shape":"BoxedInteger", + "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you are using tasks that use the Fargate launch type, the swappiness parameter is not supported.

" } }, "documentation":"

Linux-specific options that are applied to the container, such as Linux KernelCapabilities.

" @@ -2663,11 +2779,11 @@ "members":{ "targetGroupArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an application load balancer or a network load balancer. If you are using a classic load balancer this should be omitted.

For services using the ECS deployment controller, you are limited to one target group. For services using the CODE_DEPLOY deployment controller, you are required to define two target groups for the load balancer.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a Classic Load Balancer this should be omitted.

For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering Multiple Target Groups with a Service in the Amazon Elastic Container Service Developer Guide.

For services using the CODE_DEPLOY deployment controller, you are required to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "loadBalancerName":{ "shape":"String", - "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a classic load balancer. If you are using an application load balancer or a network load balancer this should be omitted.

" + "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer this should be omitted.

" }, "containerName":{ "shape":"String", @@ -2675,10 +2791,10 @@ }, "containerPort":{ "shape":"BoxedInteger", - "documentation":"

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

" + "documentation":"

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they are launched on must allow ingress traffic on the hostPort of the port mapping.

" } }, - "documentation":"

Details on a load balancer to be used with a service or task set.

If the service is using the ECS deployment controller, you are limited to one load balancer or target group.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When you are creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). Each target group binds to a separate task set in the deployment. The load balancer can also have up to two listeners, a required listener for production traffic and an optional listener that allows you to test new revisions of the service before routing production traffic to it.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance. Tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

Details on the load balancer or load balancers to use with a service or task set.

" }, "LoadBalancers":{ "type":"list", @@ -2690,7 +2806,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, syslog, gelf, fluentd, splunk, journald, and json-file.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -2698,7 +2814,7 @@ }, "secretOptions":{ "shape":"SecretList", - "documentation":"

The secrets to pass to the log configuration.

" + "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" } }, "documentation":"

Log configuration options to send to a custom log driver for the container.

" @@ -2717,7 +2833,8 @@ "gelf", "fluentd", "awslogs", - "splunk" + "splunk", + "awsfirelens" ] }, "Long":{"type":"long"}, @@ -2836,14 +2953,14 @@ "members":{ "type":{ "shape":"PlacementConstraintType", - "documentation":"

The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates. The value distinctInstance is not supported in task definitions.

" + "documentation":"

The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates.

" }, "expression":{ "shape":"String", "documentation":"

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

If you are using the Fargate launch type, task placement constraints are not supported.

" }, "PlacementConstraintType":{ "type":"string", @@ -3091,7 +3208,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" } } }, @@ -3153,17 +3270,21 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" }, "ipcMode":{ "shape":"IpcMode", "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" }, - "proxyConfiguration":{"shape":"ProxyConfiguration"} + "proxyConfiguration":{"shape":"ProxyConfiguration"}, + "inferenceAccelerators":{ + "shape":"InferenceAccelerators", + "documentation":"

The Elastic Inference accelerators to use for the containers in the task.

" + } } }, "RegisterTaskDefinitionResponse":{ @@ -3240,14 +3361,14 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The number of physical GPUs the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.

" + "documentation":"

The value for the specified resource type.

If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.

If the InferenceAccelerator type is used, the value should match the deviceName for an InferenceAccelerator specified in a task definition.

" }, "type":{ "shape":"ResourceType", - "documentation":"

The type of resource to assign to a container. The only supported value is GPU.

" + "documentation":"

The type of resource to assign to a container. The supported values are GPU or InferenceAccelerator.

" } }, - "documentation":"

The type and amount of a resource to assign to a container. The only supported resource is a GPU. For more information, see Working with GPUs on Amazon ECS in the Amazon Elastic Container Service Developer Guide

" + "documentation":"

The type and amount of a resource to assign to a container. The supported resource types are GPUs and Elastic Inference accelerators. For more information, see Working with GPUs on Amazon ECS or Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide

" }, "ResourceRequirements":{ "type":"list", @@ -3255,7 +3376,10 @@ }, "ResourceType":{ "type":"string", - "enum":["GPU"] + "enum":[ + "GPU", + "InferenceAccelerator" + ] }, "Resources":{ "type":"list", @@ -3311,7 +3435,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "enableECSManagedTags":{ "shape":"Boolean", @@ -3416,7 +3540,7 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance. Tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", @@ -3496,11 +3620,11 @@ }, "deploymentController":{ "shape":"DeploymentController", - "documentation":"

The deployment controller type the service is using.

" + "documentation":"

The deployment controller type the service is using. When using the DescribeServices API, this field is omitted if the service is using the ECS deployment controller type.

" }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "createdBy":{ "shape":"String", @@ -3674,7 +3798,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "enableECSManagedTags":{ "shape":"Boolean", @@ -3778,6 +3902,10 @@ "shape":"String", "documentation":"

The name of the container.

" }, + "runtimeId":{ + "shape":"String", + "documentation":"

The ID of the Docker container.

" + }, "status":{ "shape":"String", "documentation":"

The status of the state change request.

" @@ -3885,7 +4013,7 @@ "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" } }, - "documentation":"

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" }, "TagKey":{ "type":"string", @@ -3910,7 +4038,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs.

The following basic restrictions apply to tags:

" } } }, @@ -3991,7 +4119,7 @@ }, "version":{ "shape":"Long", - "documentation":"

The version counter for the task. Every time a task experiences a change that triggers a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS task state with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API actionss with the version reported in CloudWatch Events for the task (inside the detail object) to verify that the version in your event stream is current.

" + "documentation":"

The version counter for the task. Every time a task experiences a change that triggers a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS task state with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API actions with the version reported in CloudWatch Events for the task (inside the detail object) to verify that the version in your event stream is current.

" }, "stoppedReason":{ "shape":"String", @@ -4059,7 +4187,11 @@ }, "tags":{ "shape":"Tags", - "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" + }, + "inferenceAccelerators":{ + "shape":"InferenceAccelerators", + "documentation":"

The Elastic Inference accelerator associated with the task.

" } }, "documentation":"

Details on a task in a cluster.

" @@ -4081,7 +4213,7 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", @@ -4125,11 +4257,11 @@ }, "memory":{ "shape":"String", - "documentation":"

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

" + "documentation":"

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, this field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

" }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" }, "ipcMode":{ "shape":"IpcMode", @@ -4163,14 +4295,14 @@ "members":{ "type":{ "shape":"TaskDefinitionPlacementConstraintType", - "documentation":"

The type of constraint. The DistinctInstance constraint ensures that each task in a particular group is running on a different container instance. The MemberOf constraint restricts selection to be from a group of valid candidates.

" + "documentation":"

The type of constraint. The MemberOf constraint restricts selection to be from a group of valid candidates.

" }, "expression":{ "shape":"String", "documentation":"

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

An object representing a constraint on task placement in the task definition.

If you are using the Fargate launch type, task placement constraints are not supported.

For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

An object representing a constraint on task placement in the task definition. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

If you are using the Fargate launch type, task placement constraints are not supported.

" }, "TaskDefinitionPlacementConstraintType":{ "type":"string", @@ -4202,6 +4334,10 @@ "shape":"ContainerOverrides", "documentation":"

One or more container overrides sent to a task.

" }, + "inferenceAcceleratorOverrides":{ + "shape":"InferenceAcceleratorOverrides", + "documentation":"

The Elastic Inference accelerator override for the task.

" + }, "taskRoleArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

" @@ -4294,7 +4430,7 @@ }, "stabilityStatus":{ "shape":"StabilityStatus", - "documentation":"

The stability status, which indicates whether the task set has reached a steady state. If the following conditions are met, the task set will be in STEADY_STATE:

If any of those conditions are not met, the stability status returns STABILIZING.

" + "documentation":"

The stability status, which indicates whether the task set has reached a steady state. If the following conditions are met, the task set will be in STEADY_STATE:

If any of those conditions are not met, the stability status returns STABILIZING.

" }, "stabilityStatusAt":{ "shape":"Timestamp", @@ -4436,6 +4572,29 @@ "members":{ } }, + "UpdateClusterSettingsRequest":{ + "type":"structure", + "required":[ + "cluster", + "settings" + ], + "members":{ + "cluster":{ + "shape":"String", + "documentation":"

The name of the cluster to modify the settings for.

" + }, + "settings":{ + "shape":"ClusterSettings", + "documentation":"

The setting to use by default for a cluster. This parameter is used to enable CloudWatch Container Insights for a cluster. If this value is specified, it will override the containerInsights value set with PutAccountSetting or PutAccountSettingDefault.

" + } + } + }, + "UpdateClusterSettingsResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, "UpdateContainerAgentRequest":{ "type":"structure", "required":["containerInstance"], @@ -4566,7 +4725,7 @@ }, "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", - "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 1,800 seconds. During that time, the ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" + "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" } } }, diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json index e3450bf0..02c24a91 100644 --- a/botocore/data/eks/2017-11-01/service-2.json +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -97,6 +97,20 @@ ], "documentation":"

Lists the Amazon EKS clusters in your AWS account in the specified Region.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

List the tags for an Amazon EKS resource.

" + }, "ListUpdates":{ "name":"ListUpdates", "http":{ @@ -113,6 +127,34 @@ ], "documentation":"

Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Deletes specified tags from a resource.

" + }, "UpdateClusterConfig":{ "name":"UpdateClusterConfig", "http":{ @@ -151,6 +193,15 @@ } }, "shapes":{ + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

This exception is thrown if the request contains a semantic error. The precise meaning will depend on the API, and will be documented in the error message.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "Boolean":{"type":"boolean"}, "BoxedBoolean":{ "type":"boolean", @@ -214,6 +265,10 @@ "shape":"Logging", "documentation":"

The logging configuration for your cluster.

" }, + "identity":{ + "shape":"Identity", + "documentation":"

The identity provider information for the cluster.

" + }, "status":{ "shape":"ClusterStatus", "documentation":"

The current status of the cluster.

" @@ -229,6 +284,10 @@ "platformVersion":{ "shape":"String", "documentation":"

The platform version of your Amazon EKS cluster. For more information, see Platform Versions in the Amazon EKS User Guide .

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

" } }, "documentation":"

An object representing an Amazon EKS cluster.

" @@ -280,6 +339,10 @@ "shape":"String", "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The metadata to apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

" } } }, @@ -399,6 +462,16 @@ "type":"list", "member":{"shape":"ErrorDetail"} }, + "Identity":{ + "type":"structure", + "members":{ + "oidc":{ + "shape":"OIDC", + "documentation":"

The OpenID Connect identity provider information for the cluster.

" + } + }, + "documentation":"

An object representing an identity provider for authentication credentials.

" + }, "InvalidParameterException":{ "type":"structure", "members":{ @@ -461,6 +534,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon EKS clusters.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the resource.

" + } + } + }, "ListUpdatesRequest":{ "type":"structure", "required":["name"], @@ -546,6 +640,25 @@ }, "documentation":"

An object representing the logging configuration for resources in your cluster.

" }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

A service resource associated with the request could not be found. Clients should not retry such requests.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "OIDC":{ + "type":"structure", + "members":{ + "issuer":{ + "shape":"String", + "documentation":"

The issuer URL for the OpenID Connect identity provider.

" + } + }, + "documentation":"

An object representing the OpenID Connect identity provider information for the cluster.

" + }, "ResourceInUseException":{ "type":"structure", "members":{ @@ -614,6 +727,52 @@ "type":"list", "member":{"shape":"String"} }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon EKS clusters.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "Timestamp":{"type":"timestamp"}, "UnsupportedAvailabilityZoneException":{ "type":"structure", @@ -632,6 +791,32 @@ "error":{"httpStatusCode":400}, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resources are Amazon EKS clusters.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of the tags to be removed.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "Update":{ "type":"structure", "members":{ diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 79a9f806..ad54af43 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -643,6 +643,7 @@ {"shape":"NodeGroupsPerReplicationGroupQuotaExceededFault"}, {"shape":"NodeQuotaForCustomerExceededFault"}, {"shape":"NoOperationFault"}, + {"shape":"InvalidKMSKeyFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -665,7 +666,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Lists all available node types that you can scale your Redis cluster's or replication group's current node type up to.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale up your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

" + "documentation":"

Lists all available node types that you can scale your Redis cluster's or replication group's current node type.

When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -772,6 +773,7 @@ {"shape":"CacheSecurityGroupNotFoundFault"}, {"shape":"CacheParameterGroupNotFoundFault"}, {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidKMSKeyFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -796,6 +798,7 @@ {"shape":"InsufficientCacheClusterCapacityFault"}, {"shape":"NodeGroupsPerReplicationGroupQuotaExceededFault"}, {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"InvalidKMSKeyFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -914,6 +917,7 @@ {"shape":"NodeGroupNotFoundFault"}, {"shape":"ReplicationGroupNotFoundFault"}, {"shape":"TestFailoverNotAvailableFault"}, + {"shape":"InvalidKMSKeyFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], @@ -970,6 +974,10 @@ "ScaleUpModifications":{ "shape":"NodeTypeList", "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling up a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" + }, + "ScaleDownModifications":{ + "shape":"NodeTypeList", + "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling down on a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" } }, "documentation":"

Represents the allowed node types you can use to modify your cluster or replication group.

" @@ -1805,6 +1813,10 @@ "TargetBucket":{ "shape":"String", "documentation":"

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the KMS key used to encrypt the target snapshot.

" } }, "documentation":"

Represents the input of a CopySnapshotMessage operation.

" @@ -1821,7 +1833,7 @@ "members":{ "CacheClusterId":{ "shape":"String", - "documentation":"

The node group (shard) identifier. This parameter is stored as a lowercase string.

Constraints:

" + "documentation":"

The node group (shard) identifier. This parameter is stored as a lowercase string.

Constraints:

" }, "ReplicationGroupId":{ "shape":"String", @@ -2011,7 +2023,7 @@ "members":{ "ReplicationGroupId":{ "shape":"String", - "documentation":"

The replication group identifier. This parameter is stored as a lowercase string.

Constraints:

" + "documentation":"

The replication group identifier. This parameter is stored as a lowercase string.

Constraints:

" }, "ReplicationGroupDescription":{ "shape":"String", @@ -2115,11 +2127,15 @@ }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6 or 4.x, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

" + "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the KMS key used to encrypt the disk on the cluster.

" } }, "documentation":"

Represents the input of a CreateReplicationGroup operation.

" @@ -2145,6 +2161,10 @@ "SnapshotName":{ "shape":"String", "documentation":"

A name for the snapshot being created.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the KMS key used to encrypt the snapshot.

" } }, "documentation":"

Represents the input of a CreateSnapshot operation.

" @@ -2873,6 +2893,18 @@ }, "exception":true }, + "InvalidKMSKeyFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The KMS key supplied is not valid.

", + "error":{ + "code":"InvalidKMSKeyFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidParameterCombinationException":{ "type":"structure", "members":{ @@ -3857,6 +3889,10 @@ "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the KMS key used to encrypt the disk in the cluster.

" } }, "documentation":"

Contains all of the attributes of a specific Redis replication group.

", @@ -4462,6 +4498,10 @@ "NodeSnapshots":{ "shape":"NodeSnapshotList", "documentation":"

A list of the cache nodes in the source cluster.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the KMS key used to encrypt the snapshot.

" } }, "documentation":"

Represents a copy of an entire Redis cluster as of the time when the snapshot was taken.

", diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index 26411093..b94c58a7 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"TooManyCertificatesException"}, {"shape":"CertificateNotFoundException"} ], - "documentation":"

Adds the specified SSL server certificate to the certificate list for the specified HTTPS listener.

If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

To get the certificate list for a listener, use DescribeListenerCertificates. To remove certificates from the certificate list for a listener, use RemoveListenerCertificates. To replace the default certificate for a listener, use ModifyListener.

For more information, see SSL Certificates in the Application Load Balancers Guide.

" + "documentation":"

Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

To get the certificate list for a listener, use DescribeListenerCertificates. To remove certificates from the certificate list for a listener, use RemoveListenerCertificates. To replace the default certificate for a listener, use ModifyListener.

For more information, see SSL Certificates in the Application Load Balancers Guide.

" }, "AddTags":{ "name":"AddTags", @@ -264,7 +264,7 @@ "errors":[ {"shape":"ListenerNotFoundException"} ], - "documentation":"

Describes the default certificate and the certificate list for the specified HTTPS listener.

If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

For more information, see SSL Certificates in the Application Load Balancers Guide.

" + "documentation":"

Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

For more information, see SSL Certificates in the Application Load Balancers Guide.

" }, "DescribeListeners":{ "name":"DescribeListeners", @@ -560,7 +560,7 @@ {"shape":"ListenerNotFoundException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Removes the specified certificate from the certificate list for the specified HTTPS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

" + "documentation":"

Removes the specified certificate from the certificate list for the specified HTTPS or TLS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

" }, "RemoveTags":{ "name":"RemoveTags", @@ -933,11 +933,11 @@ }, "SubnetId":{ "shape":"SubnetId", - "documentation":"

The ID of the subnet.

" + "documentation":"

The ID of the subnet. You can specify one subnet per Availability Zone.

" }, "LoadBalancerAddresses":{ "shape":"LoadBalancerAddresses", - "documentation":"

[Network Load Balancers] The static IP address.

" + "documentation":"

[Network Load Balancers] If you need static IP addresses for your load balancer, you can specify one Elastic IP address per Availability Zone when you create the load balancer.

" } }, "documentation":"

Information about an Availability Zone.

" @@ -1072,7 +1072,7 @@ }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your load balancer.

" }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -1159,7 +1159,7 @@ }, "VpcId":{ "shape":"VpcId", - "documentation":"

The identifier of the virtual private cloud (VPC). If the target is a Lambda function, this parameter does not apply.

" + "documentation":"

The identifier of the virtual private cloud (VPC). If the target is a Lambda function, this parameter does not apply. Otherwise, this parameter is required.

" }, "HealthCheckProtocol":{ "shape":"ProtocolEnum", @@ -3024,7 +3024,7 @@ }, "Reason":{ "shape":"TargetHealthReasonEnum", - "documentation":"

The reason code.

If the target state is healthy, a reason code is not provided.

If the target state is initial, the reason code can be one of the following values:

If the target state is unhealthy, the reason code can be one of the following values:

If the target state is unused, the reason code can be one of the following values:

If the target state is draining, the reason code can be the following value:

If the target state is unavailable, the reason code can be the following value:

" + "documentation":"

The reason code.

If the target state is healthy, a reason code is not provided.

If the target state is initial, the reason code can be one of the following values:

If the target state is unhealthy, the reason code can be one of the following values:

If the target state is unused, the reason code can be one of the following values:

If the target state is draining, the reason code can be the following value:

If the target state is unavailable, the reason code can be the following value:

" }, "Description":{ "shape":"Description", @@ -3215,5 +3215,5 @@ "VpcId":{"type":"string"}, "ZoneName":{"type":"string"} }, - "documentation":"Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP/TLS). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC).

A Classic Load Balancer makes routing and load balancing decisions either at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and supports either EC2-Classic or a VPC. For more information, see the Elastic Load Balancing User Guide.

This reference covers the 2015-12-01 API, which supports Application Load Balancers and Network Load Balancers. The 2012-06-01 API supports Classic Load Balancers.

To get started, complete the following tasks:

  1. Create a load balancer using CreateLoadBalancer.

  2. Create a target group using CreateTargetGroup.

  3. Register targets for the target group using RegisterTargets.

  4. Create one or more listeners for your load balancer using CreateListener.

To delete a load balancer and its related resources, complete the following tasks:

  1. Delete the load balancer using DeleteLoadBalancer.

  2. Delete the target group using DeleteTargetGroup.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

" + "documentation":"Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. This reference covers Application Load Balancers and Network Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP/TLS). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC). For more information, see the Elastic Load Balancing User Guide.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

" } diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index 16532e48..f31c32c7 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -165,6 +165,20 @@ ], "documentation":"

Provides more detail about the cluster step.

" }, + "GetBlockPublicAccessConfiguration":{ + "name":"GetBlockPublicAccessConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBlockPublicAccessConfigurationInput"}, + "output":{"shape":"GetBlockPublicAccessConfigurationOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" + }, "ListBootstrapActions":{ "name":"ListBootstrapActions", "http":{ @@ -298,6 +312,20 @@ "output":{"shape":"PutAutoScalingPolicyOutput"}, "documentation":"

Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.

" }, + "PutBlockPublicAccessConfiguration":{ + "name":"PutBlockPublicAccessConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutBlockPublicAccessConfigurationInput"}, + "output":{"shape":"PutBlockPublicAccessConfigurationOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" + }, "RemoveAutoScalingPolicy":{ "name":"RemoveAutoScalingPolicy", "http":{ @@ -357,7 +385,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

" + "documentation":"

This member will be deprecated.

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

" }, "TerminateJobFlows":{ "name":"TerminateJobFlows", @@ -530,6 +558,11 @@ "type":"list", "member":{"shape":"Application"} }, + "ArnType":{ + "type":"string", + "max":2048, + "min":20 + }, "AutoScalingPolicy":{ "type":"structure", "required":[ @@ -613,6 +646,39 @@ }, "documentation":"

The status of an automatic scaling policy.

" }, + "BlockPublicAccessConfiguration":{ + "type":"structure", + "required":["BlockPublicSecurityGroupRules"], + "members":{ + "BlockPublicSecurityGroupRules":{ + "shape":"Boolean", + "documentation":"

Indicates whether EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

" + }, + "PermittedPublicSecurityGroupRuleRanges":{ + "shape":"PortRanges", + "documentation":"

Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.

By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of PermittedPublicSecurityGroupRuleRanges.

" + } + }, + "documentation":"

A configuration for Amazon EMR block public access. When BlockPublicSecurityGroupRules is set to true, Amazon EMR prevents cluster creation if one of the cluster's security groups has a rule that allows inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges.

" + }, + "BlockPublicAccessConfigurationMetadata":{ + "type":"structure", + "required":[ + "CreationDateTime", + "CreatedByArn" + ], + "members":{ + "CreationDateTime":{ + "shape":"Date", + "documentation":"

The date and time that the configuration was created.

" + }, + "CreatedByArn":{ + "shape":"ArnType", + "documentation":"

The Amazon Resource Name that created or last modified the configuration.

" + } + }, + "documentation":"

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" + }, "Boolean":{"type":"boolean"}, "BooleanObject":{"type":"boolean"}, "BootstrapActionConfig":{ @@ -789,7 +855,7 @@ }, "ReleaseLabel":{ "shape":"String", - "documentation":"

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.

" + "documentation":"

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

" }, "AutoTerminate":{ "shape":"Boolean", @@ -801,7 +867,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and manage the cluster if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

" + "documentation":"

This member will be deprecated.

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and manage the cluster if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

" }, "Applications":{ "shape":"ApplicationList", @@ -1261,11 +1327,11 @@ }, "Ec2SubnetId":{ "shape":"String", - "documentation":"

To launch the cluster in Amazon VPC, set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, the cluster is launched in the normal AWS cloud, outside of a VPC.

Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance type for nodes of a cluster launched in a VPC.

" + "documentation":"

Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.

" }, "RequestedEc2SubnetIds":{ "shape":"XmlStringMaxLen256List", - "documentation":"

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

" + "documentation":"

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

" }, "Ec2AvailabilityZone":{ "shape":"String", @@ -1326,6 +1392,28 @@ }, "documentation":"

The details of the step failure. The service attempts to detect the root cause for many common failures.

" }, + "GetBlockPublicAccessConfigurationInput":{ + "type":"structure", + "members":{ + } + }, + "GetBlockPublicAccessConfigurationOutput":{ + "type":"structure", + "required":[ + "BlockPublicAccessConfiguration", + "BlockPublicAccessConfigurationMetadata" + ], + "members":{ + "BlockPublicAccessConfiguration":{ + "shape":"BlockPublicAccessConfiguration", + "documentation":"

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.

" + }, + "BlockPublicAccessConfigurationMetadata":{ + "shape":"BlockPublicAccessConfigurationMetadata", + "documentation":"

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" + } + } + }, "HadoopJarStepConfig":{ "type":"structure", "required":["Jar"], @@ -2220,7 +2308,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

Specifies whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

" + "documentation":"

This member will be deprecated.

Specifies whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

" }, "JobFlowRole":{ "shape":"XmlString", @@ -2342,7 +2430,7 @@ }, "Ec2SubnetId":{ "shape":"XmlStringMaxLen256", - "documentation":"

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, the cluster launches in the normal Amazon Web Services cloud, outside of an Amazon VPC, if the account launching the cluster supports EC2 Classic networks in the region where the cluster launches.

Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type for clusters launched in an Amazon VPC.

" + "documentation":"

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.

" }, "Ec2SubnetIds":{ "shape":"XmlStringMaxLen256List", @@ -2791,6 +2879,30 @@ }, "documentation":"

The Amazon EC2 Availability Zone configuration of the cluster (job flow).

" }, + "Port":{ + "type":"integer", + "max":65535, + "min":0 + }, + "PortRange":{ + "type":"structure", + "required":["MinRange"], + "members":{ + "MinRange":{ + "shape":"Port", + "documentation":"

The smallest port number in a specified range of port numbers.

" + }, + "MaxRange":{ + "shape":"Port", + "documentation":"

The smallest port number in a specified range of port numbers.

" + } + }, + "documentation":"

A list of port ranges that are permitted to allow inbound traffic from all public IP addresses. To specify a single port, use the same value for MinRange and MaxRange.

" + }, + "PortRanges":{ + "type":"list", + "member":{"shape":"PortRange"} + }, "PutAutoScalingPolicyInput":{ "type":"structure", "required":[ @@ -2830,6 +2942,21 @@ } } }, + "PutBlockPublicAccessConfigurationInput":{ + "type":"structure", + "required":["BlockPublicAccessConfiguration"], + "members":{ + "BlockPublicAccessConfiguration":{ + "shape":"BlockPublicAccessConfiguration", + "documentation":"

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.

" + } + } + }, + "PutBlockPublicAccessConfigurationOutput":{ + "type":"structure", + "members":{ + } + }, "RemoveAutoScalingPolicyInput":{ "type":"structure", "required":[ @@ -2909,7 +3036,7 @@ }, "ReleaseLabel":{ "shape":"XmlStringMaxLen256", - "documentation":"

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.

" + "documentation":"

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

" }, "Instances":{ "shape":"JobFlowInstancesConfig", @@ -2941,7 +3068,7 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it.

" + "documentation":"

This member will be deprecated.

Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it.

" }, "JobFlowRole":{ "shape":"XmlString", @@ -3146,10 +3273,10 @@ }, "VisibleToAllUsers":{ "shape":"Boolean", - "documentation":"

Whether the specified clusters are visible to all IAM users of the AWS account associated with the cluster. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the clusters. If it is set to False, only the IAM user that created a cluster can view and manage it.

" + "documentation":"

This member will be deprecated.

Whether the specified clusters are visible to all IAM users of the AWS account associated with the cluster. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the clusters. If it is set to False, only the IAM user that created a cluster can view and manage it.

" } }, - "documentation":"

The input to the SetVisibleToAllUsers action.

" + "documentation":"

This member will be deprecated.

The input to the SetVisibleToAllUsers action.

" }, "ShrinkPolicy":{ "type":"structure", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index faca8347..5258f013 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -233,6 +233,7 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } @@ -251,6 +252,7 @@ }, "api.sagemaker" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -258,8 +260,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-1-fips" : { "credentialScope" : { @@ -393,6 +398,7 @@ }, "athena" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -798,6 +804,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -920,6 +927,15 @@ "us-west-2" : { } } }, + "connect" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "cur" : { "endpoints" : { "us-east-1" : { } @@ -933,6 +949,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -944,6 +961,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1004,6 +1022,7 @@ }, "hostname" : "datasync-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1032,6 +1051,7 @@ }, "directconnect" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1471,6 +1491,7 @@ }, "firehose" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1513,6 +1534,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, @@ -1578,6 +1600,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1627,11 +1650,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "guardduty-fips.us-east-1.amazonaws.com" + }, "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "guardduty-fips.us-east-2.amazonaws.com" + }, "us-west-1" : { }, - "us-west-2" : { } + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "guardduty-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "guardduty-fips.us-west-2.amazonaws.com" + } }, "isRegionalized" : true }, @@ -1689,6 +1737,7 @@ } }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1700,6 +1749,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1831,11 +1881,14 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1918,6 +1971,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1983,6 +2037,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2036,6 +2091,7 @@ "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2322,6 +2378,15 @@ "us-west-2" : { } } }, + "qldb" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "ram" : { "endpoints" : { "ap-northeast-1" : { }, @@ -2417,6 +2482,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "resource-groups-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "resource-groups-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "resource-groups-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "resource-groups-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2488,6 +2577,7 @@ }, "runtime.sagemaker" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2495,8 +2585,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-1-fips" : { "credentialScope" : { @@ -2918,6 +3011,7 @@ }, "servicediscovery" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2925,9 +3019,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2935,6 +3031,15 @@ "us-west-2" : { } } }, + "session.qldb" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "shield" : { "defaults" : { "protocols" : [ "https" ], @@ -2959,6 +3064,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3325,9 +3431,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -4319,6 +4427,22 @@ "us-gov-west-1" : { } } }, + "neptune" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "rds.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + } + } + }, "organizations" : { "endpoints" : { "aws-us-gov-global" : { @@ -4338,6 +4462,7 @@ }, "ram" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -4456,6 +4581,17 @@ } } }, + "servicecatalog" : { + "endpoints" : { + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "servicecatalog-fips.us-gov-west-1.amazonaws.com" + } + } + }, "sms" : { "endpoints" : { "us-gov-east-1" : { }, @@ -4568,6 +4704,510 @@ } } } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ] + }, + "dnsSuffix" : "c2s.ic.gov", + "partition" : "aws-iso", + "partitionName" : "AWS ISO (US)", + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "us-iso-east-1" : { + "description" : "US ISO East" + } + }, + "services" : { + "api.ecr" : { + "endpoints" : { + "us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "api.ecr.us-iso-east-1.c2s.ic.gov" + } + } + }, + "application-autoscaling" : { + "defaults" : { + "credentialScope" : { + "service" : "application-autoscaling" + }, + "hostname" : "autoscaling.{region}.amazonaws.com", + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "autoscaling" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "cloudformation" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "codedeploy" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "config" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "datapipeline" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "directconnect" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "dms" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "ds" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "dynamodb" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "ec2" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "elasticloadbalancing" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "elasticmapreduce" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "https" ] + } + } + }, + "events" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "glacier" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "health" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "iam" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "iam.us-iso-east-1.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, + "kinesis" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "kms-fips.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { } + } + }, + "lambda" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "logs" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "monitoring" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "rds" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "route53.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, + "s3" : { + "defaults" : { + "signatureVersions" : [ "s3v4" ] + }, + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ] + } + } + }, + "snowball" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "sns" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "sqs" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "states" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "sts" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "support" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "support.us-iso-east-1.c2s.ic.gov" + } + }, + "partitionEndpoint" : "aws-iso-global" + }, + "swf" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "workspaces" : { + "endpoints" : { + "us-iso-east-1" : { } + } + } + } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ] + }, + "dnsSuffix" : "sc2s.sgov.gov", + "partition" : "aws-iso-b", + "partitionName" : "AWS ISOB (US)", + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + }, + "services" : { + "application-autoscaling" : { + "defaults" : { + "credentialScope" : { + "service" : "application-autoscaling" + }, + "hostname" : "autoscaling.{region}.amazonaws.com", + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "config" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "directconnect" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "dms" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "dynamodb" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "ec2" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "elasticloadbalancing" : { + "endpoints" : { + "us-isob-east-1" : { + "protocols" : [ "https" ] + } + } + }, + "elasticmapreduce" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "events" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "glacier" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "health" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "iam" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "iam.us-isob-east-1.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, + "kinesis" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "kms-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { } + } + }, + "logs" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "monitoring" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "rds" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "s3" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "snowball" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "sns" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "sqs" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "sslCommonName" : "{region}.queue.{dnsSuffix}" + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "states" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "sts" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "support" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "support.us-isob-east-1.sc2s.sgov.gov" + } + }, + "partitionEndpoint" : "aws-iso-b-global" + }, + "swf" : { + "endpoints" : { + "us-isob-east-1" : { } + } + } + } } ], "version" : 3 } \ No newline at end of file diff --git a/botocore/data/forecast/2018-06-26/paginators-1.json b/botocore/data/forecast/2018-06-26/paginators-1.json new file mode 100644 index 00000000..1c63777f --- /dev/null +++ b/botocore/data/forecast/2018-06-26/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListDatasetGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DatasetGroups" + }, + "ListDatasetImportJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DatasetImportJobs" + }, + "ListDatasets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Datasets" + }, + "ListForecastExportJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ForecastExportJobs" + }, + "ListForecasts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Forecasts" + }, + "ListPredictors": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Predictors" + } + } +} diff --git a/botocore/data/forecast/2018-06-26/service-2.json b/botocore/data/forecast/2018-06-26/service-2.json new file mode 100644 index 00000000..b1dc0fd3 --- /dev/null +++ b/botocore/data/forecast/2018-06-26/service-2.json @@ -0,0 +1,2149 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-06-26", + "endpointPrefix":"forecast", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Forecast Service", + "serviceId":"forecast", + "signatureVersion":"v4", + "signingName":"forecast", + "targetPrefix":"AmazonForecast", + "uid":"forecast-2018-06-26" + }, + "operations":{ + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:

After creating a dataset, you import your training data into the dataset and add the dataset to a dataset group. You then use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets, use the ListDatasets operation.

The Status of a dataset must be ACTIVE before you can import training data. Use the DescribeDataset operation to get the status.

" + }, + "CreateDatasetGroup":{ + "name":"CreateDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetGroupRequest"}, + "output":{"shape":"CreateDatasetGroupResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates an Amazon Forecast dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or you can add datasets later with the UpdateDatasetGroup operation.

After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets groups, use the ListDatasetGroups operation.

The Status of a dataset group must be ACTIVE before you can create a predictor using the dataset group. Use the DescribeDatasetGroup operation to get the status.

" + }, + "CreateDatasetImportJob":{ + "name":"CreateDatasetImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetImportJobRequest"}, + "output":{"shape":"CreateDatasetImportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.

You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. For more information, see aws-forecast-iam-roles.

Two properties of the training data are optionally specified:

When Amazon Forecast uploads your training data, it verifies that the data was collected at the DataFrequency specified when the target dataset was created. For more information, see CreateDataset and howitworks-datasets-groups. Amazon Forecast also verifies the delimiter and timestamp format.

You can use the ListDatasetImportJobs operation to get a list of all your dataset import jobs, filtered by specified criteria.

To get a list of all your dataset import jobs, filtered by the specified criteria, use the ListDatasetGroups operation.

" + }, + "CreateForecast":{ + "name":"CreateForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateForecastRequest"}, + "output":{"shape":"CreateForecastResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a forecast for each item in the TARGET_TIME_SERIES dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3), use the CreateForecastExportJob operation.

The range of the forecast is determined by the ForecastHorizon, specified in the CreatePredictor request, multiplied by the DataFrequency, specified in the CreateDataset request. When you query a forecast, you can request a specific date range within the complete forecast.

To get a list of all your forecasts, use the ListForecasts operation.

The forecasts generated by Amazon Forecast are in the same timezone as the dataset that was used to create the predictor.

For more information, see howitworks-forecast.

The Status of the forecast must be ACTIVE before you can query or export the forecast. Use the DescribeForecast operation to get the status.

" + }, + "CreateForecastExportJob":{ + "name":"CreateForecastExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateForecastExportJobRequest"}, + "output":{"shape":"CreateForecastExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket.

You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

For more information, see howitworks-forecast.

To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.

The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket. Use the DescribeForecastExportJob operation to get the status.

" + }, + "CreatePredictor":{ + "name":"CreatePredictor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePredictorRequest"}, + "output":{"shape":"CreatePredictorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates an Amazon Forecast predictor.

In the request, you provide a dataset group and either specify an algorithm or let Amazon Forecast choose the algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.

Amazon Forecast uses the chosen algorithm to train a model using the latest version of the datasets in the specified dataset group. The result is called a predictor. You then generate a forecast using the CreateForecast operation.

After training a model, the CreatePredictor operation also evaluates it. To see the evaluation metrics, use the GetAccuracyMetrics operation. Always review the evaluation metrics before deciding to use the predictor to generate a forecast.

Optionally, you can specify a featurization configuration to fill and aggragate the data fields in the TARGET_TIME_SERIES dataset to improve model training. For more information, see FeaturizationConfig.

AutoML

If you set PerformAutoML to true, Amazon Forecast evaluates each algorithm and chooses the one that minimizes the objective function. The objective function is defined as the mean of the weighted p10, p50, and p90 quantile losses. For more information, see EvaluationResult.

When AutoML is enabled, the following properties are disallowed:

To get a list of all your predictors, use the ListPredictors operation.

The Status of the predictor must be ACTIVE, signifying that training has completed, before you can use the predictor to create a forecast. Use the DescribePredictor operation to get the status.

" + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes an Amazon Forecast dataset created using the CreateDataset operation. To be deleted, the dataset must have a status of ACTIVE or CREATE_FAILED. Use the DescribeDataset operation to get the status.

", + "idempotent":true + }, + "DeleteDatasetGroup":{ + "name":"DeleteDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetGroupRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes a dataset group created using the CreateDatasetGroup operation. To be deleted, the dataset group must have a status of ACTIVE, CREATE_FAILED, or UPDATE_FAILED. Use the DescribeDatasetGroup operation to get the status.

The operation deletes only the dataset group, not the datasets in the group.

", + "idempotent":true + }, + "DeleteDatasetImportJob":{ + "name":"DeleteDatasetImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetImportJobRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes a dataset import job created using the CreateDatasetImportJob operation. To be deleted, the import job must have a status of ACTIVE or CREATE_FAILED. Use the DescribeDatasetImportJob operation to get the status.

", + "idempotent":true + }, + "DeleteForecast":{ + "name":"DeleteForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteForecastRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes a forecast created using the CreateForecast operation. To be deleted, the forecast must have a status of ACTIVE or CREATE_FAILED. Use the DescribeForecast operation to get the status.

You can't delete a forecast while it is being exported.

", + "idempotent":true + }, + "DeleteForecastExportJob":{ + "name":"DeleteForecastExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteForecastExportJobRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes a forecast export job created using the CreateForecastExportJob operation. To be deleted, the export job must have a status of ACTIVE or CREATE_FAILED. Use the DescribeForecastExportJob operation to get the status.

", + "idempotent":true + }, + "DeletePredictor":{ + "name":"DeletePredictor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePredictorRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes a predictor created using the CreatePredictor operation. To be deleted, the predictor must have a status of ACTIVE or CREATE_FAILED. Use the DescribePredictor operation to get the status.

Any forecasts generated by the predictor will no longer be available.

", + "idempotent":true + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes an Amazon Forecast dataset created using the CreateDataset operation.

In addition to listing the properties provided by the user in the CreateDataset request, this operation includes the following properties:

", + "idempotent":true + }, + "DescribeDatasetGroup":{ + "name":"DescribeDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetGroupRequest"}, + "output":{"shape":"DescribeDatasetGroupResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a dataset group created using the CreateDatasetGroup operation.

In addition to listing the properties provided by the user in the CreateDatasetGroup request, this operation includes the following properties:

", + "idempotent":true + }, + "DescribeDatasetImportJob":{ + "name":"DescribeDatasetImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetImportJobRequest"}, + "output":{"shape":"DescribeDatasetImportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a dataset import job created using the CreateDatasetImportJob operation.

In addition to listing the properties provided by the user in the CreateDatasetImportJob request, this operation includes the following properties:

", + "idempotent":true + }, + "DescribeForecast":{ + "name":"DescribeForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeForecastRequest"}, + "output":{"shape":"DescribeForecastResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a forecast created using the CreateForecast operation.

In addition to listing the properties provided by the user in the CreateForecast request, this operation includes the following properties:

", + "idempotent":true + }, + "DescribeForecastExportJob":{ + "name":"DescribeForecastExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeForecastExportJobRequest"}, + "output":{"shape":"DescribeForecastExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a forecast export job created using the CreateForecastExportJob operation.

In addition to listing the properties provided by the user in the CreateForecastExportJob request, this operation includes the following properties:

", + "idempotent":true + }, + "DescribePredictor":{ + "name":"DescribePredictor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePredictorRequest"}, + "output":{"shape":"DescribePredictorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a predictor created using the CreatePredictor operation.

In addition to listing the properties provided by the user in the CreatePredictor request, this operation includes the following properties:

", + "idempotent":true + }, + "GetAccuracyMetrics":{ + "name":"GetAccuracyMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccuracyMetricsRequest"}, + "output":{"shape":"GetAccuracyMetricsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast.

Metrics are generated for each backtest window evaluated. For more information, see EvaluationParameters.

The parameters of the filling method determine which items contribute to the metrics. If zero is specified, all items contribute. If nan is specified, only those items that have complete data in the range being evaluated contribute. For more information, see FeaturizationMethod.

For an example of how to train a model and review metrics, see getting-started.

", + "idempotent":true + }, + "ListDatasetGroups":{ + "name":"ListDatasetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetGroupsRequest"}, + "output":{"shape":"ListDatasetGroupsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, a summary of its properties, including its Amazon Resource Name (ARN), is returned. You can retrieve the complete set of properties by using the ARN with the DescribeDatasetGroup operation.

", + "idempotent":true + }, + "ListDatasetImportJobs":{ + "name":"ListDatasetImportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetImportJobsRequest"}, + "output":{"shape":"ListDatasetImportJobsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of dataset import jobs created using the CreateDatasetImportJob operation. For each import job, a summary of its properties, including its Amazon Resource Name (ARN), is returned. You can retrieve the complete set of properties by using the ARN with the DescribeDatasetImportJob operation. You can filter the list by providing an array of Filter objects.

", + "idempotent":true + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns a list of datasets created using the CreateDataset operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. You can retrieve the complete set of properties by using the ARN with the DescribeDataset operation.

", + "idempotent":true + }, + "ListForecastExportJobs":{ + "name":"ListForecastExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListForecastExportJobsRequest"}, + "output":{"shape":"ListForecastExportJobsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of forecast export jobs created using the CreateForecastExportJob operation. For each forecast export job, a summary of its properties, including its Amazon Resource Name (ARN), is returned. You can retrieve the complete set of properties by using the ARN with the DescribeForecastExportJob operation. The list can be filtered using an array of Filter objects.

", + "idempotent":true + }, + "ListForecasts":{ + "name":"ListForecasts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListForecastsRequest"}, + "output":{"shape":"ListForecastsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of forecasts created using the CreateForecast operation. For each forecast, a summary of its properties, including its Amazon Resource Name (ARN), is returned. You can retrieve the complete set of properties by using the ARN with the DescribeForecast operation. The list can be filtered using an array of Filter objects.

", + "idempotent":true + }, + "ListPredictors":{ + "name":"ListPredictors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPredictorsRequest"}, + "output":{"shape":"ListPredictorsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of predictors created using the CreatePredictor operation. For each predictor, a summary of its properties, including its Amazon Resource Name (ARN), is returned. You can retrieve the complete set of properties by using the ARN with the DescribePredictor operation. The list can be filtered using an array of Filter objects.

", + "idempotent":true + }, + "UpdateDatasetGroup":{ + "name":"UpdateDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDatasetGroupRequest"}, + "output":{"shape":"UpdateDatasetGroupResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Replaces any existing datasets in the dataset group with the specified datasets.

The Status of the dataset group must be ACTIVE before creating a predictor using the dataset group. Use the DescribeDatasetGroup operation to get the status.

", + "idempotent":true + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\:]+$" + }, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "AttributeType":{ + "type":"string", + "enum":[ + "string", + "integer", + "float", + "timestamp" + ] + }, + "Boolean":{"type":"boolean"}, + "CategoricalParameterRange":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the categorical hyperparameter to tune.

" + }, + "Values":{ + "shape":"Values", + "documentation":"

A list of the tunable categories for the hyperparameter.

" + } + }, + "documentation":"

Specifies a categorical hyperparameter and it's range of tunable values. This object is part of the ParameterRanges object.

" + }, + "CategoricalParameterRanges":{ + "type":"list", + "member":{"shape":"CategoricalParameterRange"}, + "max":20, + "min":1 + }, + "ContinuousParameterRange":{ + "type":"structure", + "required":[ + "Name", + "MaxValue", + "MinValue" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the hyperparameter to tune.

" + }, + "MaxValue":{ + "shape":"Double", + "documentation":"

The maximum tunable value of the hyperparameter.

" + }, + "MinValue":{ + "shape":"Double", + "documentation":"

The minimum tunable value of the hyperparameter.

" + }, + "ScalingType":{ + "shape":"ScalingType", + "documentation":"

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

ReverseLogarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

Reverse logarithmic scaling works only for ranges that are entirely within the range 0 <= x < 1.0.

" + } + }, + "documentation":"

Specifies a continuous hyperparameter and it's range of tunable values. This object is part of the ParameterRanges object.

" + }, + "ContinuousParameterRanges":{ + "type":"list", + "member":{"shape":"ContinuousParameterRange"}, + "max":20, + "min":1 + }, + "CreateDatasetGroupRequest":{ + "type":"structure", + "required":[ + "DatasetGroupName", + "Domain" + ], + "members":{ + "DatasetGroupName":{ + "shape":"Name", + "documentation":"

A name for the dataset group.

" + }, + "Domain":{ + "shape":"Domain", + "documentation":"

The domain associated with the dataset group. The Domain and DatasetType that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to be present in your data. For more information, see howitworks-datasets-groups.

" + }, + "DatasetArns":{ + "shape":"ArnList", + "documentation":"

An array of Amazon Resource Names (ARNs) of the datasets that you want to include in the dataset group.

" + } + } + }, + "CreateDatasetGroupResponse":{ + "type":"structure", + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group.

" + } + } + }, + "CreateDatasetImportJobRequest":{ + "type":"structure", + "required":[ + "DatasetImportJobName", + "DatasetArn", + "DataSource" + ], + "members":{ + "DatasetImportJobName":{ + "shape":"Name", + "documentation":"

The name for the dataset import job. It is recommended to include the current timestamp in the name to guard against getting a ResourceAlreadyExistsException exception, for example, 20190721DatasetImport.

" + }, + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.

" + }, + "DataSource":{ + "shape":"DataSource", + "documentation":"

The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data.

" + }, + "TimestampFormat":{ + "shape":"TimestampFormat", + "documentation":"

The format of timestamps in the dataset. Two formats are supported, dependent on the DataFrequency specified when the dataset was created.

" + } + } + }, + "CreateDatasetImportJobResponse":{ + "type":"structure", + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset import job.

" + } + } + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "Domain", + "DatasetType", + "Schema" + ], + "members":{ + "DatasetName":{ + "shape":"Name", + "documentation":"

A name for the dataset.

" + }, + "Domain":{ + "shape":"Domain", + "documentation":"

The domain associated with the dataset. The Domain and DatasetType that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to be present in your data. For more information, see howitworks-datasets-groups.

" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The dataset type. Valid values depend on the chosen Domain.

" + }, + "DataFrequency":{ + "shape":"Frequency", + "documentation":"

The frequency of data collection.

Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, \"D\" indicates every day and \"15min\" indicates every 15 minutes.

" + }, + "Schema":{ + "shape":"Schema", + "documentation":"

The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain and DatasetType that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see howitworks-domains-ds-types.

" + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

" + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset.

" + } + } + }, + "CreateForecastExportJobRequest":{ + "type":"structure", + "required":[ + "ForecastExportJobName", + "ForecastArn", + "Destination" + ], + "members":{ + "ForecastExportJobName":{ + "shape":"Name", + "documentation":"

The name for the forecast export job.

" + }, + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast that you want to export.

" + }, + "Destination":{ + "shape":"DataDestination", + "documentation":"

The path to the Amazon S3 bucket where you want to save the forecast and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the bucket.

" + } + } + }, + "CreateForecastExportJobResponse":{ + "type":"structure", + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the export job.

" + } + } + }, + "CreateForecastRequest":{ + "type":"structure", + "required":[ + "ForecastName", + "PredictorArn" + ], + "members":{ + "ForecastName":{ + "shape":"Name", + "documentation":"

The name for the forecast.

" + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor to use to generate the forecast.

" + } + } + }, + "CreateForecastResponse":{ + "type":"structure", + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast.

" + } + } + }, + "CreatePredictorRequest":{ + "type":"structure", + "required":[ + "PredictorName", + "ForecastHorizon", + "InputDataConfig", + "FeaturizationConfig" + ], + "members":{ + "PredictorName":{ + "shape":"Name", + "documentation":"

A name for the predictor.

" + }, + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if PerformAutoML is not set to true.

Supported algorithms

" + }, + "ForecastHorizon":{ + "shape":"Integer", + "documentation":"

Specifies the number of time-steps that the model is trained to predict. The forecast horizon is also called the prediction length.

For example, if you configure a dataset for daily data collection (using the DataFrequency parameter of the CreateDataset operation) and set the forecast horizon to 10, the model returns predictions for 10 days.

" + }, + "PerformAutoML":{ + "shape":"Boolean", + "documentation":"

Whether to perform AutoML. The default value is false. In this case, you are required to specify an algorithm.

If you want Amazon Forecast to evaluate the algorithms it provides and choose the best algorithm and configuration for your training dataset, set PerformAutoML to true. This is a good option if you aren't sure which algorithm is suitable for your application.

" + }, + "PerformHPO":{ + "shape":"Boolean", + "documentation":"

Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as a hyperparameter tuning job.

The default value is false. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.

To override the default values, set PerformHPO to true and supply the HyperParameterTuningJobConfig object. The tuning job specifies an objective metric, the hyperparameters to optimize, and the valid range for each hyperparameter.

The following algorithms support HPO:

" + }, + "TrainingParameters":{ + "shape":"TrainingParameters", + "documentation":"

The training parameters to override for model training. The parameters that you can override are listed in the individual algorithms in aws-forecast-choosing-recipes.

" + }, + "EvaluationParameters":{ + "shape":"EvaluationParameters", + "documentation":"

Used to override the default evaluation parameters of the specified algorithm. Amazon Forecast evaluates a predictor by splitting a dataset into training data and testing data. The evaluation parameters define how to perform the split and the number of iterations.

" + }, + "HPOConfig":{ + "shape":"HyperParameterTuningJobConfig", + "documentation":"

Provides hyperparameter override values for the algorithm. If you don't provide this parameter, Amazon Forecast uses default values. The individual algorithms specify which hyperparameters support hyperparameter optimization (HPO). For more information, see aws-forecast-choosing-recipes.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Describes the dataset group that contains the data to use to train the predictor.

" + }, + "FeaturizationConfig":{ + "shape":"FeaturizationConfig", + "documentation":"

The featurization configuration.

" + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

" + } + } + }, + "CreatePredictorResponse":{ + "type":"structure", + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor.

" + } + } + }, + "DataDestination":{ + "type":"structure", + "required":["S3Config"], + "members":{ + "S3Config":{ + "shape":"S3Config", + "documentation":"

The path to an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the bucket.

" + } + }, + "documentation":"

The destination of an exported forecast and credentials to access the location. This object is submitted in the CreateForecastExportJob request.

" + }, + "DataSource":{ + "type":"structure", + "required":["S3Config"], + "members":{ + "S3Config":{ + "shape":"S3Config", + "documentation":"

The path to the training data stored in an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the data.

" + } + }, + "documentation":"

The source of your training data and credentials to access the data. This object is submitted in the CreateDatasetImportJob request.

" + }, + "DatasetGroupSummary":{ + "type":"structure", + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group.

" + }, + "DatasetGroupName":{ + "shape":"Name", + "documentation":"

The name of the dataset group.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the datase group was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current query time.

" + } + }, + "documentation":"

Provides a summary of the dataset group properties used in the ListDatasetGroups operation. To get the complete set of properties, call the DescribeDatasetGroup operation, and provide the listed DatasetGroupArn.

" + }, + "DatasetGroups":{ + "type":"list", + "member":{"shape":"DatasetGroupSummary"} + }, + "DatasetImportJobSummary":{ + "type":"structure", + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset import job.

" + }, + "DatasetImportJobName":{ + "shape":"Name", + "documentation":"

The name of the dataset import job.

" + }, + "DataSource":{ + "shape":"DataSource", + "documentation":"

The location of the Amazon S3 bucket that contains the training data.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. States include:

" + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset import job was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

Dependent on the status as follows:

" + } + }, + "documentation":"

Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the listed DatasetImportJobArn.

" + }, + "DatasetImportJobs":{ + "type":"list", + "member":{"shape":"DatasetImportJobSummary"} + }, + "DatasetSummary":{ + "type":"structure", + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset.

" + }, + "DatasetName":{ + "shape":"Name", + "documentation":"

The name of the dataset.

" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The dataset type.

" + }, + "Domain":{ + "shape":"Domain", + "documentation":"

The domain associated with the dataset.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset is created, LastModificationTime is the same as CreationTime. After a CreateDatasetImportJob operation is called, LastModificationTime is when the import job finished or failed. While data is being imported to the dataset, LastModificationTime is the current query time.

" + } + }, + "documentation":"

Provides a summary of the dataset properties used in the ListDatasets operation. To get the complete set of properties, call the DescribeDataset operation, and provide the listed DatasetArn.

" + }, + "DatasetType":{ + "type":"string", + "enum":[ + "TARGET_TIME_SERIES", + "RELATED_TIME_SERIES", + "ITEM_METADATA" + ] + }, + "Datasets":{ + "type":"list", + "member":{"shape":"DatasetSummary"} + }, + "DeleteDatasetGroupRequest":{ + "type":"structure", + "required":["DatasetGroupArn"], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group to delete.

" + } + } + }, + "DeleteDatasetImportJobRequest":{ + "type":"structure", + "required":["DatasetImportJobArn"], + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset import job to delete.

" + } + } + }, + "DeleteDatasetRequest":{ + "type":"structure", + "required":["DatasetArn"], + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset to delete.

" + } + } + }, + "DeleteForecastExportJobRequest":{ + "type":"structure", + "required":["ForecastExportJobArn"], + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast export job to delete.

" + } + } + }, + "DeleteForecastRequest":{ + "type":"structure", + "required":["ForecastArn"], + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast to delete.

" + } + } + }, + "DeletePredictorRequest":{ + "type":"structure", + "required":["PredictorArn"], + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor to delete.

" + } + } + }, + "DescribeDatasetGroupRequest":{ + "type":"structure", + "required":["DatasetGroupArn"], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group.

" + } + } + }, + "DescribeDatasetGroupResponse":{ + "type":"structure", + "members":{ + "DatasetGroupName":{ + "shape":"Name", + "documentation":"

The name of the dataset group.

" + }, + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group.

" + }, + "DatasetArns":{ + "shape":"ArnList", + "documentation":"

An array of Amazon Resource Names (ARNs) of the datasets contained in the dataset group.

" + }, + "Domain":{ + "shape":"Domain", + "documentation":"

The domain associated with the dataset group. The Domain and DatasetType that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to be present in your data. For more information, see howitworks-datasets-groups.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the dataset group. States include:

The UPDATE states apply when the UpdateDatasetGroup operation is called.

The Status of the dataset group must be ACTIVE before creating a predictor using the dataset group.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset group was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current query time.

" + } + } + }, + "DescribeDatasetImportJobRequest":{ + "type":"structure", + "required":["DatasetImportJobArn"], + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset import job.

" + } + } + }, + "DescribeDatasetImportJobResponse":{ + "type":"structure", + "members":{ + "DatasetImportJobName":{ + "shape":"Name", + "documentation":"

The name of the dataset import job.

" + }, + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset import job.

" + }, + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset that the training data was imported to.

" + }, + "TimestampFormat":{ + "shape":"TimestampFormat", + "documentation":"

The format of timestamps in the dataset. Two formats are supported dependent on the DataFrequency specified when the dataset was created.

" + }, + "DataSource":{ + "shape":"DataSource", + "documentation":"

The location of the training data to import. The training data must be stored in an Amazon S3 bucket.

" + }, + "FieldStatistics":{ + "shape":"FieldStatistics", + "documentation":"

Statistical information about each field in the input data.

" + }, + "DataSize":{ + "shape":"Double", + "documentation":"

The size of the dataset in gigabytes (GB) after completion of the import job.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. States include:

" + }, + "Message":{ + "shape":"Message", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset import job was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

Dependent on the status as follows:

" + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":["DatasetArn"], + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset.

" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset.

" + }, + "DatasetName":{ + "shape":"Name", + "documentation":"

The name of the dataset.

" + }, + "Domain":{ + "shape":"Domain", + "documentation":"

The dataset domain.

" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The dataset type.

" + }, + "DataFrequency":{ + "shape":"Frequency", + "documentation":"

The frequency of data collection.

Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, \"M\" indicates every month and \"30min\" indicates every 30 minutes.

" + }, + "Schema":{ + "shape":"Schema", + "documentation":"

An array of SchemaAttribute objects that specify the dataset fields. Each SchemaAttribute specifies the name and data type of a field.

" + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the dataset. States include:

The UPDATE states apply while data is imported to the dataset from a call to the CreateDatasetImportJob operation. During this time, the status reflects the status of the dataset import job. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS.

The Status of the dataset must be ACTIVE before you can import training data.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the dataset is created, LastModificationTime is the same as CreationTime. After a CreateDatasetImportJob operation is called, LastModificationTime is when the import job finished or failed. While data is being imported to the dataset, LastModificationTime is the current query time.

" + } + } + }, + "DescribeForecastExportJobRequest":{ + "type":"structure", + "required":["ForecastExportJobArn"], + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast export job.

" + } + } + }, + "DescribeForecastExportJobResponse":{ + "type":"structure", + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

The ARN of the forecast export job.

" + }, + "ForecastExportJobName":{ + "shape":"Name", + "documentation":"

The name of the forecast export job.

" + }, + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the exported forecast.

" + }, + "Destination":{ + "shape":"DataDestination", + "documentation":"

The path to the AWS S3 bucket where the forecast is exported.

" + }, + "Message":{ + "shape":"Message", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the forecast export job. One of the following states:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the forecast export job was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the last successful export job finished.

" + } + } + }, + "DescribeForecastRequest":{ + "type":"structure", + "required":["ForecastArn"], + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast.

" + } + } + }, + "DescribeForecastResponse":{ + "type":"structure", + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The same forecast ARN as given in the request.

" + }, + "ForecastName":{ + "shape":"Name", + "documentation":"

The name of the forecast.

" + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The ARN of the predictor used to generate the forecast.

" + }, + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group that provided the data used to train the predictor.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

" + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the forecast creation task was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), and when inference is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + } + } + }, + "DescribePredictorRequest":{ + "type":"structure", + "required":["PredictorArn"], + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor that you want information about.

" + } + } + }, + "DescribePredictorResponse":{ + "type":"structure", + "members":{ + "PredictorArn":{ + "shape":"Name", + "documentation":"

The ARN of the predictor.

" + }, + "PredictorName":{ + "shape":"Name", + "documentation":"

The name of the predictor.

" + }, + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the algorithm used for model training.

" + }, + "ForecastHorizon":{ + "shape":"Integer", + "documentation":"

The number of time-steps of the forecast. The forecast horizon is also called the prediction length.

" + }, + "PerformAutoML":{ + "shape":"Boolean", + "documentation":"

Whether the predictor is set to perform AutoML.

" + }, + "PerformHPO":{ + "shape":"Boolean", + "documentation":"

Whether the predictor is set to perform HPO.

" + }, + "TrainingParameters":{ + "shape":"TrainingParameters", + "documentation":"

The training parameters to override for model training. The parameters that you can override are listed in the individual algorithms in aws-forecast-choosing-recipes.

" + }, + "EvaluationParameters":{ + "shape":"EvaluationParameters", + "documentation":"

Used to override the default evaluation parameters of the specified algorithm. Amazon Forecast evaluates a predictor by splitting a dataset into training data and testing data. The evaluation parameters define how to perform the split and the number of iterations.

" + }, + "HPOConfig":{ + "shape":"HyperParameterTuningJobConfig", + "documentation":"

The hyperparameter override values for the algorithm.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Describes the dataset group that contains the data to use to train the predictor.

" + }, + "FeaturizationConfig":{ + "shape":"FeaturizationConfig", + "documentation":"

The featurization configuration.

" + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

" + }, + "DatasetImportJobArns":{ + "shape":"ArnList", + "documentation":"

An array of ARNs of the dataset import jobs used to import training data for the predictor.

" + }, + "AutoMLAlgorithmArns":{ + "shape":"ArnList", + "documentation":"

When PerformAutoML is specified, the ARN of the chosen algorithm.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before using the predictor to create a forecast.

" + }, + "Message":{ + "shape":"Message", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the model training task was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when training starts (status changed to CREATE_IN_PROGRESS), and when training is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + } + } + }, + "Domain":{ + "type":"string", + "enum":[ + "RETAIL", + "CUSTOM", + "INVENTORY_PLANNING", + "EC2_CAPACITY", + "WORK_FORCE", + "WEB_TRAFFIC", + "METRICS" + ] + }, + "Double":{"type":"double"}, + "EncryptionConfig":{ + "type":"structure", + "required":[ + "RoleArn", + "KMSKeyArn" + ], + "members":{ + "RoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the AWS KMS key.

Cross-account pass role is not allowed. If you pass a role that doesn't belong to your account, an InvalidInputException is thrown.

" + }, + "KMSKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key.

" + } + }, + "documentation":"

An AWS Key Management Service (KMS) key and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key. This object is optionally submitted in the CreateDataset and CreatePredictor requests.

" + }, + "ErrorMessage":{"type":"string"}, + "EvaluationParameters":{ + "type":"structure", + "members":{ + "NumberOfBacktestWindows":{ + "shape":"Integer", + "documentation":"

The number of times to split the input data. The default is 1. The range is 1 through 5.

" + }, + "BackTestWindowOffset":{ + "shape":"Integer", + "documentation":"

The point from the end of the dataset where you want to split the data for model training and evaluation. The value is specified as the number of data points.

" + } + }, + "documentation":"

Parameters that define how to split a dataset into training data and testing data, and the number of iterations to perform. These parameters are specified in the predefined algorithms and can be overridden in the CreatePredictor request.

For example, suppose that you have a dataset with data collection frequency set to every day and you have 200 days worth of data (that is, 200 data points). Now suppose that you set the NumberOfBacktestWindows to 2 and the BackTestWindowOffset parameter to 20. The algorithm splits the data twice. The first time, the algorithm trains the model using the first 180 data points and uses the last 20 data points for evaluation. The second time, the algorithm trains the model using the first 160 data points and uses the last 40 data points for evaluation.

" + }, + "EvaluationResult":{ + "type":"structure", + "members":{ + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the algorithm that was evaluated.

" + }, + "TestWindows":{ + "shape":"TestWindows", + "documentation":"

The array of test windows used for evaluating the algorithm. The NumberOfBacktestWindows from the EvaluationParameters object determines the number of windows in the array.

" + } + }, + "documentation":"

The results of evaluating an algorithm. Returned as part of the GetAccuracyMetrics response.

" + }, + "EvaluationType":{ + "type":"string", + "enum":[ + "SUMMARY", + "COMPUTED" + ] + }, + "Featurization":{ + "type":"structure", + "required":["AttributeName"], + "members":{ + "AttributeName":{ + "shape":"Name", + "documentation":"

The name of the schema attribute specifying the data field to be featurized. In this release, only the target field of the TARGET_TIME_SERIES dataset type is supported. For example, for the RETAIL domain, the target is demand, and for the CUSTOM domain, the target is target_value.

" + }, + "FeaturizationPipeline":{ + "shape":"FeaturizationPipeline", + "documentation":"

An array FeaturizationMethod objects that specifies the feature transformation methods. For this release, the number of methods is limited to one.

" + } + }, + "documentation":"

Provides featurization (transformation) information for a dataset field. This object is part of the FeaturizationConfig object.

For example:

{

\"AttributeName\": \"demand\",

FeaturizationPipeline [ {

\"FeaturizationMethodName\": \"filling\",

\"FeaturizationMethodParameters\": {\"aggregation\": \"avg\", \"backfill\": \"nan\"}

} ]

}

" + }, + "FeaturizationConfig":{ + "type":"structure", + "required":["ForecastFrequency"], + "members":{ + "ForecastFrequency":{ + "shape":"Frequency", + "documentation":"

The frequency of predictions in a forecast.

Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, \"Y\" indicates every year and \"5min\" indicates every five minutes.

" + }, + "ForecastDimensions":{ + "shape":"ForecastDimensions", + "documentation":"

An array of dimension (field) names that specify how to group the generated forecast.

For example, suppose that you are generating a forecast for item sales across all of your stores, and your dataset contains a store_id field. If you want the sales forecast for each item by store, you would specify store_id as the dimension.

" + }, + "Featurizations":{ + "shape":"Featurizations", + "documentation":"

An array of featurization (transformation) information for the fields of a dataset. In this release, only a single featurization is supported.

" + } + }, + "documentation":"

In a CreatePredictor operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as featurization.

You define featurization using the FeaturizationConfig object. You specify an array of transformations, one for each field that you want to featurize. You then include the FeaturizationConfig in your CreatePredictor request. Amazon Forecast applies the featurization to the TARGET_TIME_SERIES dataset before model training.

You can create multiple featurization configurations. For example, you might call the CreatePredictor operation twice by specifying different featurization configurations.

" + }, + "FeaturizationMethod":{ + "type":"structure", + "required":["FeaturizationMethodName"], + "members":{ + "FeaturizationMethodName":{ + "shape":"FeaturizationMethodName", + "documentation":"

The name of the method. In this release, \"filling\" is the only supported method.

" + }, + "FeaturizationMethodParameters":{ + "shape":"FeaturizationMethodParameters", + "documentation":"

The method parameters (key-value pairs). Specify these to override the default values. The following list shows the parameters and their valid values. Bold signifies the default value.

" + } + }, + "documentation":"

Provides information about a method that featurizes (transforms) a dataset field. The method is part of the FeaturizationPipeline of the Featurization object. If FeaturizationMethodParameters isn't specified, Amazon Forecast uses default parameters.

For example:

{

\"FeaturizationMethodName\": \"filling\",

\"FeaturizationMethodParameters\": {\"aggregation\": \"avg\", \"backfill\": \"nan\"}

}

" + }, + "FeaturizationMethodName":{ + "type":"string", + "enum":["filling"] + }, + "FeaturizationMethodParameters":{ + "type":"map", + "key":{"shape":"ParameterKey"}, + "value":{"shape":"ParameterValue"}, + "max":20, + "min":1 + }, + "FeaturizationPipeline":{ + "type":"list", + "member":{"shape":"FeaturizationMethod"}, + "max":1, + "min":1 + }, + "Featurizations":{ + "type":"list", + "member":{"shape":"Featurization"}, + "max":1, + "min":1 + }, + "FieldStatistics":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Statistics"} + }, + "Filter":{ + "type":"structure", + "required":[ + "Key", + "Value", + "Condition" + ], + "members":{ + "Key":{ + "shape":"String", + "documentation":"

The name of the parameter to filter on.

" + }, + "Value":{ + "shape":"Arn", + "documentation":"

A valid value for Key.

" + }, + "Condition":{ + "shape":"FilterConditionString", + "documentation":"

The condition to apply.

" + } + }, + "documentation":"

Describes a filter for choosing a subset of objects. Each filter consists of a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude, respectively, the objects that match the statement. The match statement consists of a key and a value.

" + }, + "FilterConditionString":{ + "type":"string", + "enum":[ + "IS", + "IS_NOT" + ] + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "ForecastDimensions":{ + "type":"list", + "member":{"shape":"Name"}, + "max":5, + "min":1 + }, + "ForecastExportJobSummary":{ + "type":"structure", + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast export job.

" + }, + "ForecastExportJobName":{ + "shape":"Name", + "documentation":"

The name of the forecast export job.

" + }, + "Destination":{ + "shape":"DataDestination", + "documentation":"

The path to the S3 bucket where the forecast is stored.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the forecast export job. One of the following states:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket.

" + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the forecast export job was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the last successful export job finished.

" + } + }, + "documentation":"

Provides a summary of the forecast export job properties used in the ListForecastExportJobs operation. To get the complete set of properties, call the DescribeForecastExportJob operation, and provide the listed ForecastExportJobArn.

" + }, + "ForecastExportJobs":{ + "type":"list", + "member":{"shape":"ForecastExportJobSummary"} + }, + "ForecastSummary":{ + "type":"structure", + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The ARN of the forecast.

" + }, + "ForecastName":{ + "shape":"Name", + "documentation":"

The name of the forecast.

" + }, + "PredictorArn":{ + "shape":"String", + "documentation":"

The ARN of the predictor used to generate the forecast.

" + }, + "DatasetGroupArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group that provided the data used to train the predictor.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

" + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the forecast creation task was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), and when inference is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + } + }, + "documentation":"

Provides a summary of the forecast properties used in the ListForecasts operation. To get the complete set of properties, call the DescribeForecast operation, and provide the listed ForecastArn.

" + }, + "Forecasts":{ + "type":"list", + "member":{"shape":"ForecastSummary"} + }, + "Frequency":{ + "type":"string", + "pattern":"^Y|M|W|D|H|30min|15min|10min|5min|1min$" + }, + "GetAccuracyMetricsRequest":{ + "type":"structure", + "required":["PredictorArn"], + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor to get metrics for.

" + } + } + }, + "GetAccuracyMetricsResponse":{ + "type":"structure", + "members":{ + "PredictorEvaluationResults":{ + "shape":"PredictorEvaluationResults", + "documentation":"

An array of results from evaluating the predictor.

" + } + } + }, + "HyperParameterTuningJobConfig":{ + "type":"structure", + "members":{ + "ParameterRanges":{ + "shape":"ParameterRanges", + "documentation":"

Specifies the ranges of valid values for the hyperparameters.

" + } + }, + "documentation":"

Configuration information for a hyperparameter tuning job. This object is specified in the CreatePredictor request.

A hyperparameter is a parameter that governs the model training process and is set before training starts. This is as opposed to a model parameter that is determined during training. The values of the hyperparameters have an effect on the chosen model parameters.

A hyperparameter tuning job is the process of choosing the optimum set of hyperparameter values that optimize a specified metric. This is accomplished by running many training jobs over a range of hyperparameter values. The optimum set of values is dependent on the algorithm, the training data, and the given metric objective.

" + }, + "InputDataConfig":{ + "type":"structure", + "required":["DatasetGroupArn"], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group.

" + }, + "SupplementaryFeatures":{ + "shape":"SupplementaryFeatures", + "documentation":"

An array of supplementary features. For this release, the only supported feature is a holiday calendar.

" + } + }, + "documentation":"

The data used to train a predictor. The data includes a dataset group and any supplementary features. This object is specified in the CreatePredictor request.

" + }, + "Integer":{"type":"integer"}, + "IntegerParameterRange":{ + "type":"structure", + "required":[ + "Name", + "MaxValue", + "MinValue" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the hyperparameter to tune.

" + }, + "MaxValue":{ + "shape":"Integer", + "documentation":"

The maximum tunable value of the hyperparameter.

" + }, + "MinValue":{ + "shape":"Integer", + "documentation":"

The minimum tunable value of the hyperparameter.

" + }, + "ScalingType":{ + "shape":"ScalingType", + "documentation":"

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

ReverseLogarithmic

Not supported for IntegerParameterRange.

Reverse logarithmic scaling works only for ranges that are entirely within the range 0 <= x < 1.0.

" + } + }, + "documentation":"

Specifies an integer hyperparameter and it's range of tunable values. This object is part of the ParameterRanges object.

" + }, + "IntegerParameterRanges":{ + "type":"list", + "member":{"shape":"IntegerParameterRange"}, + "max":20, + "min":1 + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

We can't process the request because it includes an invalid value or a value that exceeds the valid range.

", + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The token is not valid. Tokens expire after 24 hours.

", + "exception":true + }, + "KMSKeyArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws:kms:.*:key/.*" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The limit on the number of requests per second has been exceeded.

", + "exception":true + }, + "ListDatasetGroupsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + } + } + }, + "ListDatasetGroupsResponse":{ + "type":"structure", + "members":{ + "DatasetGroups":{ + "shape":"DatasetGroups", + "documentation":"

An array of objects that summarize each dataset group's properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

" + } + } + }, + "ListDatasetImportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude, respectively, from the list, the predictors that match the statement. The match statement consists of a key and a value. In this release, Name is the only valid key, which filters on the DatasetImportJobName property.

For example, to list all dataset import jobs named my_dataset_import_job, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Name\", \"Value\": \"my_dataset_import_job\" } ]

" + } + } + }, + "ListDatasetImportJobsResponse":{ + "type":"structure", + "members":{ + "DatasetImportJobs":{ + "shape":"DatasetImportJobs", + "documentation":"

An array of objects that summarize each dataset import job's properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

" + } + } + }, + "ListDatasetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "members":{ + "Datasets":{ + "shape":"Datasets", + "documentation":"

An array of objects that summarize each dataset's properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

" + } + } + }, + "ListForecastExportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude, respectively, from the list, the predictors that match the statement. The match statement consists of a key and a value. In this release, Name is the only valid key, which filters on the ForecastExportJobName property.

For example, to list all forecast export jobs named my_forecast_export_job, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Name\", \"Value\": \"my_forecast_export_job\" } ]

" + } + } + }, + "ListForecastExportJobsResponse":{ + "type":"structure", + "members":{ + "ForecastExportJobs":{ + "shape":"ForecastExportJobs", + "documentation":"

An array of objects that summarize each export job's properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

" + } + } + }, + "ListForecastsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude, respectively, from the list, the predictors that match the statement. The match statement consists of a key and a value. In this release, Name is the only valid key, which filters on the ForecastName property.

For example, to list all forecasts named my_forecast, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Name\", \"Value\": \"my_forecast\" } ]

" + } + } + }, + "ListForecastsResponse":{ + "type":"structure", + "members":{ + "Forecasts":{ + "shape":"Forecasts", + "documentation":"

An array of objects that summarize each forecast's properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

" + } + } + }, + "ListPredictorsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude, respectively, from the list, the predictors that match the statement. The match statement consists of a key and a value. In this release, Name is the only valid key, which filters on the PredictorName property.

For example, to list all predictors named my_predictor, you would specify:

\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Name\", \"Value\": \"my_predictor\" } ]

" + } + } + }, + "ListPredictorsResponse":{ + "type":"structure", + "members":{ + "Predictors":{ + "shape":"Predictors", + "documentation":"

An array of objects that summarize each predictor's properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Message":{"type":"string"}, + "Metrics":{ + "type":"structure", + "members":{ + "RMSE":{ + "shape":"Double", + "documentation":"

The root mean square error (RMSE).

" + }, + "WeightedQuantileLosses":{ + "shape":"WeightedQuantileLosses", + "documentation":"

An array of weighted quantile losses. Quantiles divide a probability distribution into regions of equal probability. The distribution in this case is the loss function.

" + } + }, + "documentation":"

Provides metrics used to evaluate the performance of a predictor. This object is part of the WindowSummary object.

" + }, + "Name":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_]*" + }, + "NextToken":{ + "type":"string", + "max":3000, + "min":1 + }, + "ParameterKey":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\[\\]\\,\\\\]+$" + }, + "ParameterRanges":{ + "type":"structure", + "members":{ + "CategoricalParameterRanges":{ + "shape":"CategoricalParameterRanges", + "documentation":"

Specifies the tunable range for each categorical hyperparameter.

" + }, + "ContinuousParameterRanges":{ + "shape":"ContinuousParameterRanges", + "documentation":"

Specifies the tunable range for each continuous hyperparameter.

" + }, + "IntegerParameterRanges":{ + "shape":"IntegerParameterRanges", + "documentation":"

Specifies the tunable range for each integer hyperparameter.

" + } + }, + "documentation":"

Specifies the categorical, continuous, and integer hyperparameters, and their ranges of tunable values. The range of tunable values determines which values that a hyperparameter tuning job can choose for the specified hyperparameter. This object is part of the HyperParameterTuningJobConfig object.

" + }, + "ParameterValue":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\[\\]\\,\\\"\\\\\\s]+$" + }, + "PredictorEvaluationResults":{ + "type":"list", + "member":{"shape":"EvaluationResult"} + }, + "PredictorSummary":{ + "type":"structure", + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The ARN of the predictor.

" + }, + "PredictorName":{ + "shape":"Name", + "documentation":"

The name of the predictor.

" + }, + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group that contains the data used to train the predictor.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before using the predictor to create a forecast.

" + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

If an error occurred, an informational message about the error.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the model training task was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when training starts (status changed to CREATE_IN_PROGRESS), and when training is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + } + }, + "documentation":"

Provides a summary of the predictor properties used in the ListPredictors operation. To get the complete set of properties, call the DescribePredictor operation, and provide the listed PredictorArn.

" + }, + "Predictors":{ + "type":"list", + "member":{"shape":"PredictorSummary"} + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

There is already a resource with that Amazon Resource Name (ARN). Try again with a different ARN.

", + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified resource is in use.

", + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.

", + "exception":true + }, + "S3Config":{ + "type":"structure", + "required":[ + "Path", + "RoleArn" + ], + "members":{ + "Path":{ + "shape":"S3Path", + "documentation":"

The path to an Amazon Simple Storage Service (Amazon S3) bucket or file(s) in an Amazon S3 bucket.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket or file(s).

Cross-account pass role is not allowed. If you pass a role that doesn't belong to your account, an InvalidInputException is thrown.

" + }, + "KMSKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key.

" + } + }, + "documentation":"

The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is submitted in the CreateDatasetImportJob and CreateForecastExportJob requests.

" + }, + "S3Path":{ + "type":"string", + "pattern":"^s3://.+$" + }, + "ScalingType":{ + "type":"string", + "enum":[ + "Auto", + "Linear", + "Logarithmic", + "ReverseLogarithmic" + ] + }, + "Schema":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"SchemaAttributes", + "documentation":"

An array of attributes specifying the name and type of each field in a dataset.

" + } + }, + "documentation":"

Defines the fields of a dataset. This object is specified in the CreateDataset request.

" + }, + "SchemaAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"Name", + "documentation":"

The name of the dataset field.

" + }, + "AttributeType":{ + "shape":"AttributeType", + "documentation":"

The data type of the field.

" + } + }, + "documentation":"

An attribute of a schema, which defines a field of a dataset. A schema attribute is required for every field in a dataset. The Schema object contains an array of SchemaAttribute objects.

" + }, + "SchemaAttributes":{ + "type":"list", + "member":{"shape":"SchemaAttribute"} + }, + "Statistics":{ + "type":"structure", + "members":{ + "Count":{ + "shape":"Integer", + "documentation":"

The number of values in the field.

" + }, + "CountDistinct":{ + "shape":"Integer", + "documentation":"

The number of distinct values in the field.

" + }, + "CountNull":{ + "shape":"Integer", + "documentation":"

The number of null values in the field.

" + }, + "CountNan":{ + "shape":"Integer", + "documentation":"

The number of NAN (not a number) values in the field.

" + }, + "Min":{ + "shape":"String", + "documentation":"

For a numeric field, the minimum value in the field.

" + }, + "Max":{ + "shape":"String", + "documentation":"

For a numeric field, the maximum value in the field.

" + }, + "Avg":{ + "shape":"Double", + "documentation":"

For a numeric field, the average value in the field.

" + }, + "Stddev":{ + "shape":"Double", + "documentation":"

For a numeric field, the standard deviation.

" + } + }, + "documentation":"

Provides statistics for each data field imported to an Amazon Forecast dataset with the CreateDatasetImportJob operation.

" + }, + "Status":{ + "type":"string", + "max":256 + }, + "String":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\_]+$" + }, + "SupplementaryFeature":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the feature. This must be \"holiday\".

" + }, + "Value":{ + "shape":"Value", + "documentation":"

One of the following 2 letter country codes:

" + } + }, + "documentation":"

Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object.

For this release, the only supported feature is a holiday calendar. If the calendar is used, all data should belong to the same country as the calendar. For the calendar data, see http://jollyday.sourceforge.net/data.html.

" + }, + "SupplementaryFeatures":{ + "type":"list", + "member":{"shape":"SupplementaryFeature"}, + "max":1, + "min":1 + }, + "TestWindows":{ + "type":"list", + "member":{"shape":"WindowSummary"} + }, + "Timestamp":{"type":"timestamp"}, + "TimestampFormat":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\:\\.\\,\\'\\s]+$" + }, + "TrainingParameters":{ + "type":"map", + "key":{"shape":"ParameterKey"}, + "value":{"shape":"ParameterValue"}, + "max":100, + "min":0 + }, + "UpdateDatasetGroupRequest":{ + "type":"structure", + "required":[ + "DatasetGroupArn", + "DatasetArns" + ], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group.

" + }, + "DatasetArns":{ + "shape":"ArnList", + "documentation":"

An array of Amazon Resource Names (ARNs) of the datasets to add to the dataset group.

" + } + } + }, + "UpdateDatasetGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "Value":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\_\\-]+$" + }, + "Values":{ + "type":"list", + "member":{"shape":"Value"}, + "max":20, + "min":1 + }, + "WeightedQuantileLoss":{ + "type":"structure", + "members":{ + "Quantile":{ + "shape":"Double", + "documentation":"

The quantile. Quantiles divide a probability distribution into regions of equal probability. For example, if the distribution was divided into 5 regions of equal probability, the quantiles would be 0.2, 0.4, 0.6, and 0.8.

" + }, + "LossValue":{ + "shape":"Double", + "documentation":"

The difference between the predicted value and actual value over the quantile, weighted (normalized) by dividing by the sum over all quantiles.

" + } + }, + "documentation":"

The weighted loss value for a quantile. This object is part of the Metrics object.

" + }, + "WeightedQuantileLosses":{ + "type":"list", + "member":{"shape":"WeightedQuantileLoss"} + }, + "WindowSummary":{ + "type":"structure", + "members":{ + "TestWindowStart":{ + "shape":"Timestamp", + "documentation":"

The timestamp that defines the start of the window.

" + }, + "TestWindowEnd":{ + "shape":"Timestamp", + "documentation":"

The timestamp that defines the end of the window.

" + }, + "ItemCount":{ + "shape":"Integer", + "documentation":"

The number of data points within the window.

" + }, + "EvaluationType":{ + "shape":"EvaluationType", + "documentation":"

The type of evaluation.

" + }, + "Metrics":{"shape":"Metrics"} + }, + "documentation":"

The metrics for a time range within the evaluation portion of a dataset. This object is part of the EvaluationResult object.

The TestWindowStart and TestWindowEnd parameters are determined by the BackTestWindowOffset parameter of the EvaluationParameters object.

" + } + }, + "documentation":"

Provides APIs for creating and managing Amazon Forecast resources.

" +} diff --git a/botocore/data/forecastquery/2018-06-26/paginators-1.json b/botocore/data/forecastquery/2018-06-26/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/forecastquery/2018-06-26/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/forecastquery/2018-06-26/service-2.json b/botocore/data/forecastquery/2018-06-26/service-2.json new file mode 100644 index 00000000..94aac8e5 --- /dev/null +++ b/botocore/data/forecastquery/2018-06-26/service-2.json @@ -0,0 +1,190 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-06-26", + "endpointPrefix":"forecastquery", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Forecast Query Service", + "serviceId":"forecastquery", + "signatureVersion":"v4", + "signingName":"forecast", + "targetPrefix":"AmazonForecastRuntime", + "uid":"forecastquery-2018-06-26" + }, + "operations":{ + "QueryForecast":{ + "name":"QueryForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryForecastRequest"}, + "output":{"shape":"QueryForecastResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Retrieves a forecast filtered by the supplied criteria.

The criteria is a key-value pair. The key is either item_id (or the equivalent non-timestamp, non-target field) from the TARGET_TIME_SERIES dataset, or one of the forecast dimensions specified as part of the FeaturizationConfig object.

By default, the complete date range of the filtered forecast is returned. Optionally, you can request a specific date range within the forecast.

The forecasts generated by Amazon Forecast are in the same timezone as the dataset that was used to create the predictor.

" + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":256, + "pattern":"arn:([a-z\\d-]+):forecast:.*:.*:.+" + }, + "AttributeName":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\_\\-]+$" + }, + "AttributeValue":{ + "type":"string", + "max":256 + }, + "DataPoint":{ + "type":"structure", + "members":{ + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of the specific forecast.

" + }, + "Value":{ + "shape":"Double", + "documentation":"

The forecast value.

" + } + }, + "documentation":"

The forecast value for a specific date. Part of the Forecast object.

" + }, + "DateTime":{ + "type":"string", + "max":20, + "pattern":"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z$" + }, + "Double":{"type":"double"}, + "ErrorMessage":{"type":"string"}, + "Filters":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"}, + "max":50, + "min":1 + }, + "Forecast":{ + "type":"structure", + "members":{ + "Predictions":{ + "shape":"Predictions", + "documentation":"

The forecast.

The string of the string to array map is one of the following values:

" + } + }, + "documentation":"

Provides information about a forecast. Returned as part of the QueryForecast response.

" + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The value that you provided was invalid or too long.

", + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The token is not valid. Tokens expire after 24 hours.

", + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The limit on the number of requests per second has been exceeded.

", + "exception":true + }, + "NextToken":{ + "type":"string", + "max":3000, + "min":1 + }, + "Predictions":{ + "type":"map", + "key":{"shape":"Statistic"}, + "value":{"shape":"TimeSeries"} + }, + "QueryForecastRequest":{ + "type":"structure", + "required":[ + "ForecastArn", + "Filters" + ], + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the forecast to query.

" + }, + "StartDate":{ + "shape":"DateTime", + "documentation":"

The start date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss'Z' (ISO 8601 format) For example, \"1970-01-01T00:00:00Z.\"

" + }, + "EndDate":{ + "shape":"DateTime", + "documentation":"

The end date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss'Z' (ISO 8601 format). For example, \"1970-01-01T00:00:00Z.\"

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

The filtering criteria to apply when retrieving the forecast. For example:

To get the full forecast, use the operation.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + } + } + }, + "QueryForecastResponse":{ + "type":"structure", + "members":{ + "Forecast":{ + "shape":"Forecast", + "documentation":"

The forecast.

" + } + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified resource is in use.

", + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

We can't find that resource. Check the information that you've provided and try again.

", + "exception":true + }, + "Statistic":{ + "type":"string", + "max":4 + }, + "TimeSeries":{ + "type":"list", + "member":{"shape":"DataPoint"} + }, + "Timestamp":{ + "type":"string", + "max":20, + "pattern":"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z$" + } + }, + "documentation":"

Provides APIs for creating and managing Amazon Forecast resources.

" +} diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 1c86d919..22a5c94d 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -1383,6 +1383,20 @@ "FAILED" ] }, + "CertificateConfiguration":{ + "type":"structure", + "required":["CertificateType"], + "members":{ + "CertificateType":{"shape":"CertificateType"} + } + }, + "CertificateType":{ + "type":"string", + "enum":[ + "DISABLED", + "GENERATED" + ] + }, "ComparisonOperatorType":{ "type":"string", "enum":[ @@ -1546,7 +1560,8 @@ "InstanceRoleArn":{ "shape":"NonEmptyString", "documentation":"

Unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, daemons (background processes). Create a role or look up a role's ARN using the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" - } + }, + "CertificateConfiguration":{"shape":"CertificateConfiguration"} }, "documentation":"

Represents the input for a request action.

" }, @@ -2722,6 +2737,7 @@ "type":"list", "member":{"shape":"DesiredPlayerSession"} }, + "DnsName":{"type":"string"}, "Double":{"type":"double"}, "DoubleObject":{"type":"double"}, "EC2InstanceCounts":{ @@ -2983,7 +2999,8 @@ "InstanceRoleArn":{ "shape":"NonEmptyString", "documentation":"

Unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, daemons (background processes). Create a role or look up a role's ARN using the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" - } + }, + "CertificateConfiguration":{"shape":"CertificateConfiguration"} }, "documentation":"

General properties describing a fleet.

" }, @@ -3161,6 +3178,7 @@ "shape":"IpAddress", "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" }, + "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PortNumber", "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" @@ -3200,6 +3218,7 @@ "shape":"StringModel", "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" }, + "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PositiveInteger", "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" @@ -3301,6 +3320,7 @@ "shape":"IpAddress", "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" }, + "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PortNumber", "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" @@ -3326,7 +3346,8 @@ "PENDING", "FULFILLED", "CANCELLED", - "TIMED_OUT" + "TIMED_OUT", + "FAILED" ] }, "GameSessionQueue":{ @@ -3475,6 +3496,7 @@ "shape":"IpAddress", "documentation":"

IP address assigned to the instance.

" }, + "DnsName":{"shape":"DnsName"}, "OperatingSystem":{ "shape":"OperatingSystem", "documentation":"

Operating system that is running on this instance.

" @@ -4174,6 +4196,7 @@ "shape":"IpAddress", "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" }, + "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PortNumber", "documentation":"

Port number for the game session. To connect to a Amazon GameLift server process, an app needs both the IP address and port number.

" diff --git a/botocore/data/globalaccelerator/2018-08-08/service-2.json b/botocore/data/globalaccelerator/2018-08-08/service-2.json index c1b523e6..2ebe607c 100644 --- a/botocore/data/globalaccelerator/2018-08-08/service-2.json +++ b/botocore/data/globalaccelerator/2018-08-08/service-2.json @@ -26,7 +26,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. To see an AWS CLI example of creating an accelerator, scroll down to Example.

" + "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. To see an AWS CLI example of creating an accelerator, scroll down to Example.

You must specify the US-West-2 (Oregon) Region to create or update accelerators.

" }, "CreateEndpointGroup":{ "name":"CreateEndpointGroup", @@ -42,7 +42,8 @@ {"shape":"ListenerNotFoundException"}, {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one AWS Region. To see an AWS CLI example of creating an endpoint group, scroll down to Example.

" }, @@ -87,6 +88,7 @@ }, "input":{"shape":"DeleteEndpointGroupRequest"}, "errors":[ + {"shape":"InvalidArgumentException"}, {"shape":"EndpointGroupNotFoundException"}, {"shape":"InternalServiceErrorException"} ], @@ -100,6 +102,7 @@ }, "input":{"shape":"DeleteListenerRequest"}, "errors":[ + {"shape":"InvalidArgumentException"}, {"shape":"ListenerNotFoundException"}, {"shape":"AssociatedEndpointGroupFoundException"}, {"shape":"InternalServiceErrorException"} @@ -145,6 +148,7 @@ "input":{"shape":"DescribeEndpointGroupRequest"}, "output":{"shape":"DescribeEndpointGroupResponse"}, "errors":[ + {"shape":"InvalidArgumentException"}, {"shape":"EndpointGroupNotFoundException"}, {"shape":"InternalServiceErrorException"} ], @@ -174,6 +178,7 @@ "input":{"shape":"ListAcceleratorsRequest"}, "output":{"shape":"ListAcceleratorsResponse"}, "errors":[ + {"shape":"InvalidArgumentException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceErrorException"} ], @@ -204,6 +209,7 @@ "input":{"shape":"ListListenersRequest"}, "output":{"shape":"ListListenersResponse"}, "errors":[ + {"shape":"InvalidArgumentException"}, {"shape":"AcceleratorNotFoundException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceErrorException"} @@ -223,7 +229,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Update an accelerator.

" + "documentation":"

Update an accelerator. To see an AWS CLI example of updating an accelerator, scroll down to Example.

You must specify the US-West-2 (Oregon) Region to create or update accelerators.

" }, "UpdateAcceleratorAttributes":{ "name":"UpdateAcceleratorAttributes", @@ -236,7 +242,8 @@ "errors":[ {"shape":"AcceleratorNotFoundException"}, {"shape":"InternalServiceErrorException"}, - {"shape":"InvalidArgumentException"} + {"shape":"InvalidArgumentException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

Update the attributes for an accelerator. To see an AWS CLI example of updating an accelerator to enable flow logs, scroll down to Example.

" }, @@ -252,7 +259,8 @@ {"shape":"InvalidArgumentException"}, {"shape":"EndpointGroupNotFoundException"}, {"shape":"InternalServiceErrorException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

Update an endpoint group. To see an AWS CLI example of updating an endpoint group, scroll down to Example.

" }, @@ -284,7 +292,7 @@ }, "Name":{ "shape":"GenericString", - "documentation":"

The name of the accelerator. The name can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens (-), and must not begin or end with a hyphen.

" + "documentation":"

The name of the accelerator. The name must contain only alphanumeric characters or hyphens (-), and must not begin or end with a hyphen.

" }, "IpAddressType":{ "shape":"IpAddressType", @@ -292,11 +300,15 @@ }, "Enabled":{ "shape":"GenericBoolean", - "documentation":"

Indicates whether theaccelerator is enabled. The value is true or false. The default value is true.

If the value is set to true, the accelerator cannot be deleted. If set to false, accelerator can be deleted.

" + "documentation":"

Indicates whether the accelerator is enabled. The value is true or false. The default value is true.

If the value is set to true, the accelerator cannot be deleted. If set to false, accelerator can be deleted.

" }, "IpSets":{ "shape":"IpSets", - "documentation":"

IP address set associated with the accelerator.

" + "documentation":"

The static IP addresses that Global Accelerator associates with the accelerator.

" + }, + "DnsName":{ + "shape":"GenericString", + "documentation":"

The Domain Name System (DNS) name that Global Accelerator creates that points to your accelerator's static IP addresses.

The naming convention for the DNS name is: a lower case letter a, followed by a 16-bit random hex string, followed by .awsglobalaccelerator.com. For example: a1234567890abcdef.awsglobalaccelerator.com.

For more information about the default DNS name, see Support for DNS Addressing in Global Accelerator in the AWS Global Accelerator Developer Guide.

" }, "Status":{ "shape":"AcceleratorStatus", @@ -358,6 +370,14 @@ "type":"list", "member":{"shape":"Accelerator"} }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

You don't have access permission.

", + "exception":true + }, "AssociatedEndpointGroupFoundException":{ "type":"structure", "members":{ @@ -546,10 +566,11 @@ }, "DescribeAcceleratorAttributesRequest":{ "type":"structure", + "required":["AcceleratorArn"], "members":{ "AcceleratorArn":{ "shape":"GenericString", - "documentation":"

The Amazon Resource Name (ARN) of the accelerator with the attributes that you want to describe. Value is required.

" + "documentation":"

The Amazon Resource Name (ARN) of the accelerator with the attributes that you want to describe.

" } } }, @@ -629,6 +650,10 @@ "Weight":{ "shape":"EndpointWeight", "documentation":"

The weight associated with the endpoint. When you add weights to endpoints, you configure AWS Global Accelerator to route traffic based on proportions that you specify. For example, you might specify endpoint weights of 4, 5, 5, and 6 (sum=20). The result is that 4/20 of your traffic, on average, is routed to the first endpoint, 5/20 is routed both to the second and third endpoints, and 6/20 is routed to the last endpoint. For more information, see Endpoint Weights in the AWS Global Accelerator Developer Guide.

" + }, + "ClientIPPreservationEnabled":{ + "shape":"GenericBoolean", + "documentation":"

Indicates whether client IP address preservation is enabled for an Application Load Balancer endpoint. The value is true or false. The default value is true for new accelerators.

If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the Application Load Balancer endpoint fronted by the accelerator.

For more information, see Viewing Client IP Addresses in AWS Global Accelerator in the AWS Global Accelerator Developer Guide.

" } }, "documentation":"

A complex type for endpoints.

" @@ -644,7 +669,7 @@ "members":{ "EndpointId":{ "shape":"GenericString", - "documentation":"

An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID.

" + "documentation":"

An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID. An Application Load Balancer can be either internal or internet-facing.

" }, "Weight":{ "shape":"EndpointWeight", @@ -657,6 +682,10 @@ "HealthReason":{ "shape":"GenericString", "documentation":"

The reason code associated with why the endpoint is not healthy. If the endpoint state is healthy, a reason code is not provided.

If the endpoint state is unhealthy, the reason code can be one of the following values:

If the endpoint state is initial, the reason code can be one of the following values:

" + }, + "ClientIPPreservationEnabled":{ + "shape":"GenericBoolean", + "documentation":"

Indicates whether client IP address preservation is enabled for an Application Load Balancer endpoint. The value is true or false. The default value is true for new accelerators.

If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the Application Load Balancer endpoint fronted by the accelerator.

For more information, see Viewing Client IP Addresses in AWS Global Accelerator in the AWS Global Accelerator Developer Guide.

" } }, "documentation":"

A complex type for an endpoint. Each endpoint group can include one or more endpoints, such as load balancers.

" @@ -1009,10 +1038,11 @@ }, "UpdateAcceleratorAttributesRequest":{ "type":"structure", + "required":["AcceleratorArn"], "members":{ "AcceleratorArn":{ "shape":"GenericString", - "documentation":"

The Amazon Resource Name (ARN) of the accelerator that you want to update. Attribute is required.

" + "documentation":"

The Amazon Resource Name (ARN) of the accelerator that you want to update.

" }, "FlowLogsEnabled":{ "shape":"GenericBoolean", @@ -1147,5 +1177,5 @@ } } }, - "documentation":"AWS Global Accelerator

This is the AWS Global Accelerator API Reference. This guide is for developers who need detailed information about AWS Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the AWS Global Accelerator Developer Guide.

AWS Global Accelerator is a network layer service in which you create accelerators to improve availability and performance for internet applications used by a global audience.

Global Accelerator provides you with static IP addresses that you associate with your accelerator. These IP addresses are anycast from the AWS edge network and distribute incoming application traffic across multiple endpoint resources in multiple AWS Regions, which increases the availability of your applications. Endpoints can be Elastic IP addresses, Network Load Balancers, and Application Load Balancers that are located in one AWS Region or multiple Regions.

Global Accelerator uses the AWS global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is directed to only healthy endpoints.

Global Accelerator includes components that work together to help you improve performance and availability for your applications:

Static IP address

AWS Global Accelerator provides you with a set of static IP addresses which are anycast from the AWS edge network and serve as the single fixed entry points for your clients. If you already have Elastic Load Balancing or Elastic IP address resources set up for your applications, you can easily add those to Global Accelerator to allow the resources to be accessed by a Global Accelerator static IP address.

Accelerator

An accelerator directs traffic to optimal endpoints over the AWS global network to improve availability and performance for your internet applications that have a global audience. Each accelerator includes one or more listeners.

Network zone

A network zone services the static IP addresses for your accelerator from a unique IP subnet. Similar to an AWS Availability Zone, a network zone is an isolated unit with its own set of physical infrastructure. When you configure an accelerator, Global Accelerator allocates two IPv4 addresses for it. If one IP address from a network zone becomes unavailable due to IP address blocking by certain client networks, or network disruptions, then client applications can retry on the healthy static IP address from the other isolated network zone.

Listener

A listener processes inbound connections from clients to Global Accelerator, based on the protocol and port that you configure. Each listener has one or more endpoint groups associated with it, and traffic is forwarded to endpoints in one of the groups. You associate endpoint groups with listeners by specifying the Regions that you want to distribute traffic to. Traffic is distributed to optimal endpoints within the endpoint groups associated with a listener.

Endpoint group

Each endpoint group is associated with a specific AWS Region. Endpoint groups include one or more endpoints in the Region. You can increase or reduce the percentage of traffic that would be otherwise directed to an endpoint group by adjusting a setting called a traffic dial. The traffic dial lets you easily do performance testing or blue/green deployment testing for new releases across different AWS Regions, for example.

Endpoint

An endpoint is an Elastic IP address, Network Load Balancer, or Application Load Balancer. Traffic is routed to endpoints based on several factors, including the geo-proximity to the user, the health of the endpoint, and the configuration options that you choose, such as endpoint weights. For each endpoint, you can configure weights, which are numbers that you can use to specify the proportion of traffic to route to each one. This can be useful, for example, to do performance testing within a Region.

" + "documentation":"AWS Global Accelerator

This is the AWS Global Accelerator API Reference. This guide is for developers who need detailed information about AWS Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the AWS Global Accelerator Developer Guide.

AWS Global Accelerator is a network layer service in which you create accelerators to improve availability and performance for internet applications used by a global audience.

You must specify the US-West-2 (Oregon) Region to create or update accelerators.

Global Accelerator provides you with static IP addresses that you associate with your accelerator. These IP addresses are anycast from the AWS edge network and distribute incoming application traffic across multiple endpoint resources in multiple AWS Regions, which increases the availability of your applications. Endpoints can be Elastic IP addresses, Network Load Balancers, and Application Load Balancers that are located in one AWS Region or multiple Regions.

Global Accelerator uses the AWS global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is directed to only healthy endpoints.

Global Accelerator includes components that work together to help you improve performance and availability for your applications:

Static IP address

AWS Global Accelerator provides you with a set of static IP addresses which are anycast from the AWS edge network and serve as the single fixed entry points for your clients. If you already have Elastic Load Balancing or Elastic IP address resources set up for your applications, you can easily add those to Global Accelerator to allow the resources to be accessed by a Global Accelerator static IP address.

Accelerator

An accelerator directs traffic to optimal endpoints over the AWS global network to improve availability and performance for your internet applications that have a global audience. Each accelerator includes one or more listeners.

Network zone

A network zone services the static IP addresses for your accelerator from a unique IP subnet. Similar to an AWS Availability Zone, a network zone is an isolated unit with its own set of physical infrastructure. When you configure an accelerator, Global Accelerator allocates two IPv4 addresses for it. If one IP address from a network zone becomes unavailable due to IP address blocking by certain client networks, or network disruptions, then client applications can retry on the healthy static IP address from the other isolated network zone.

Listener

A listener processes inbound connections from clients to Global Accelerator, based on the protocol and port that you configure. Each listener has one or more endpoint groups associated with it, and traffic is forwarded to endpoints in one of the groups. You associate endpoint groups with listeners by specifying the Regions that you want to distribute traffic to. Traffic is distributed to optimal endpoints within the endpoint groups associated with a listener.

Endpoint group

Each endpoint group is associated with a specific AWS Region. Endpoint groups include one or more endpoints in the Region. You can increase or reduce the percentage of traffic that would be otherwise directed to an endpoint group by adjusting a setting called a traffic dial. The traffic dial lets you easily do performance testing or blue/green deployment testing for new releases across different AWS Regions, for example.

Endpoint

An endpoint is an Elastic IP address, Network Load Balancer, or Application Load Balancer. Traffic is routed to endpoints based on several factors, including the geo-proximity to the user, the health of the endpoint, and the configuration options that you choose, such as endpoint weights. For each endpoint, you can configure weights, which are numbers that you can use to specify the proportion of traffic to route to each one. This can be useful, for example, to do performance testing within a Region.

" } diff --git a/botocore/data/glue/2017-03-31/paginators-1.json b/botocore/data/glue/2017-03-31/paginators-1.json index 094e5c86..9ef561c9 100644 --- a/botocore/data/glue/2017-03-31/paginators-1.json +++ b/botocore/data/glue/2017-03-31/paginators-1.json @@ -83,12 +83,6 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "SecurityConfigurations" - }, - "GetJobBookmarks": { - "input_token": "NextToken", - "limit_key": "MaxResults", - "output_token": "NextToken", - "result_key": "JobBookmarkEntries" } } } diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index c041a5b9..3f037288 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -944,22 +944,6 @@ ], "documentation":"

Returns information on a job bookmark entry.

" }, - "GetJobBookmarks":{ - "name":"GetJobBookmarks", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetJobBookmarksRequest"}, - "output":{"shape":"GetJobBookmarksResponse"}, - "errors":[ - {"shape":"InvalidInputException"}, - {"shape":"EntityNotFoundException"}, - {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} - ], - "documentation":"

Returns information on the job bookmark entries. The list is ordered on decreasing version numbers.

" - }, "GetJobRun":{ "name":"GetJobRun", "http":{ @@ -3501,6 +3485,10 @@ "shape":"WorkerType", "documentation":"

The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.

Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.

" }, + "GlueVersion":{ + "shape":"GlueVersionString", + "documentation":"

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.

For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

Development endpoints that are created without specifying a Glue version default to Glue 0.9.

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" + }, "NumberOfWorkers":{ "shape":"NullableInteger", "documentation":"

The number of workers of a defined workerType that are allocated to the development endpoint.

The maximum number of workers you can define are 299 for G.1X, and 149 for G.2X.

" @@ -3566,6 +3554,10 @@ "shape":"WorkerType", "documentation":"

The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.

" }, + "GlueVersion":{ + "shape":"GlueVersionString", + "documentation":"

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.

" + }, "NumberOfWorkers":{ "shape":"NullableInteger", "documentation":"

The number of workers of a defined workerType that are allocated to the development endpoint.

" @@ -3600,7 +3592,7 @@ }, "Arguments":{ "shape":"MapValue", - "documentation":"

The map of arguments used to configure this DevEndpoint.

" + "documentation":"

The map of arguments used to configure this DevEndpoint.

Valid arguments are:

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" } } }, @@ -4602,6 +4594,10 @@ "shape":"WorkerType", "documentation":"

The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.

Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.

" }, + "GlueVersion":{ + "shape":"GlueVersionString", + "documentation":"

Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.

For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

Development endpoints that are created without specifying a Glue version default to Glue 0.9.

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" + }, "NumberOfWorkers":{ "shape":"NullableInteger", "documentation":"

The number of workers of a defined workerType that are allocated to the development endpoint.

The maximum number of workers you can define are 299 for G.1X, and 149 for G.2X.

" @@ -4656,7 +4652,7 @@ }, "Arguments":{ "shape":"MapValue", - "documentation":"

A map of arguments used to configure the DevEndpoint.

Currently, only \"--enable-glue-datacatalog\": \"\" is supported as a valid argument.

" + "documentation":"

A map of arguments used to configure the DevEndpoint.

Valid arguments are:

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" } }, "documentation":"

A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.

" @@ -5298,37 +5294,6 @@ } } }, - "GetJobBookmarksRequest":{ - "type":"structure", - "required":["JobName"], - "members":{ - "JobName":{ - "shape":"JobName", - "documentation":"

The name of the job in question.

" - }, - "MaxResults":{ - "shape":"IntegerValue", - "documentation":"

The maximum size of the response.

" - }, - "NextToken":{ - "shape":"IntegerValue", - "documentation":"

A continuation token, if this is a continuation call.

" - } - } - }, - "GetJobBookmarksResponse":{ - "type":"structure", - "members":{ - "JobBookmarkEntries":{ - "shape":"JobBookmarkEntryList", - "documentation":"

A list of job bookmark entries that defines a point that a job can resume processing.

" - }, - "NextToken":{ - "shape":"IntegerValue", - "documentation":"

A continuation token, which has a value of 1 if all the entries are returned, or > 1 if not all requested job runs have been returned.

" - } - } - }, "GetJobRequest":{ "type":"structure", "required":["JobName"], @@ -6604,10 +6569,6 @@ }, "documentation":"

Defines a point that a job can resume processing.

" }, - "JobBookmarkEntryList":{ - "type":"list", - "member":{"shape":"JobBookmarkEntry"} - }, "JobBookmarksEncryption":{ "type":"structure", "members":{ @@ -9316,7 +9277,7 @@ }, "AddArguments":{ "shape":"MapValue", - "documentation":"

The map of arguments to add the map of arguments used to configure the DevEndpoint.

" + "documentation":"

The map of arguments to add the map of arguments used to configure the DevEndpoint.

Valid arguments are:

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" } } }, diff --git a/botocore/data/greengrass/2017-06-07/service-2.json b/botocore/data/greengrass/2017-06-07/service-2.json index 2edbd044..3411e863 100644 --- a/botocore/data/greengrass/2017-06-07/service-2.json +++ b/botocore/data/greengrass/2017-06-07/service-2.json @@ -1558,7 +1558,7 @@ "shape" : "BadRequestException", "documentation" : "invalid request" } ], - "documentation" : "Add resource tags to a Greengrass Resource. Valid resources are Group, Connector, Core, Device, Function, Logger, Subscription, and Resource Defintions, and also BulkDeploymentIds." + "documentation" : "Adds tags to a Greengrass resource. Valid resources are 'Group', 'ConnectorDefinition', 'CoreDefinition', 'DeviceDefinition', 'FunctionDefinition', 'LoggerDefinition', 'SubscriptionDefinition', 'ResourceDefinition', and 'BulkDeployment'." }, "UntagResource" : { "name" : "UntagResource", @@ -2854,6 +2854,10 @@ "IotJobId" : { "shape" : "__string", "documentation" : "The IoT Job Id corresponding to this update." + }, + "PlatformSoftwareVersion" : { + "shape" : "__string", + "documentation" : "The software version installed on the device or devices after the update." } } }, @@ -4097,11 +4101,11 @@ }, "Id" : { "shape" : "__string", - "documentation" : "The ID of the group version." + "documentation" : "The ID of the group that the version is associated with." }, "Version" : { "shape" : "__string", - "documentation" : "The unique ID for the version of the group." + "documentation" : "The ID of the group version." } } }, @@ -5887,11 +5891,11 @@ }, "Id" : { "shape" : "__string", - "documentation" : "The ID of the version." + "documentation" : "The ID of the parent definition that the version is associated with." }, "Version" : { "shape" : "__string", - "documentation" : "The unique ID of the version." + "documentation" : "The ID of the version." } }, "documentation" : "Information about a version." diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index 98104080..34eaaf22 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -2598,7 +2598,7 @@ "documentation":"

The data type of the value (or values) specified in the ContextKeyValues parameter.

" } }, - "documentation":"

Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

This data type is used as an input parameter to SimulateCustomPolicy and SimulateCustomPolicy .

" + "documentation":"

Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

This data type is used as an input parameter to SimulateCustomPolicy and SimulatePrincipalPolicy .

" }, "ContextEntryListType":{ "type":"list", @@ -2861,7 +2861,7 @@ }, "AssumeRolePolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The trust relationship policy document that grants an entity permission to assume the role.

In IAM, you must provide a JSON policy that has been converted to a string. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

Upon success, the response includes the same trust policy as a URL-encoded JSON string.

" + "documentation":"

The trust relationship policy document that grants an entity permission to assume the role.

In IAM, you must provide a JSON policy that has been converted to a string. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

Upon success, the response includes the same trust policy in JSON format.

" }, "Description":{ "shape":"roleDescriptionType", diff --git a/botocore/data/kinesisanalytics/2015-08-14/service-2.json b/botocore/data/kinesisanalytics/2015-08-14/service-2.json index bceb851f..b808a99b 100644 --- a/botocore/data/kinesisanalytics/2015-08-14/service-2.json +++ b/botocore/data/kinesisanalytics/2015-08-14/service-2.json @@ -254,7 +254,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Retrieves the list of key-value tags assigned to the application.

" + "documentation":"

Retrieves the list of key-value tags assigned to the application. For more information, see Using Tagging.

" }, "StartApplication":{ "name":"StartApplication", @@ -303,7 +303,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50.

" + "documentation":"

Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

" }, "UntagResource":{ "name":"UntagResource", @@ -320,7 +320,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Removes one or more tags from a Kinesis Analytics application.

" + "documentation":"

Removes one or more tags from a Kinesis Analytics application. For more information, see Using Tagging.

" }, "UpdateApplication":{ "name":"UpdateApplication", @@ -781,7 +781,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

A list of one or more tags to assign to the application. A tag is a key-value pair that identifies an application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management Guide.

" + "documentation":"

A list of one or more tags to assign to the application. A tag is a key-value pair that identifies an application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

" } }, "documentation":"

TBD

" @@ -1176,7 +1176,7 @@ "members":{ "ResourceARNUpdate":{ "shape":"ResourceARN", - "documentation":"

The Amazon Resource Name (ARN) of the new AWS Lambda function that is used to preprocess the records in the stream.

To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see Example ARNs: AWS Lambda

" + "documentation":"

The Amazon Resource Name (ARN) of the new AWS Lambda function that is used to preprocess the records in the stream.

To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see Example ARNs: AWS Lambda

" }, "RoleARNUpdate":{ "shape":"RoleARN", @@ -1984,7 +1984,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Discovery failed to get a record from the streaming source because of the Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, see GetRecords in the Amazon Kinesis Streams API Reference.

", + "documentation":"

Discovery failed to get a record from the streaming source because of the Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, see GetRecords in the Amazon Kinesis Streams API Reference.

", "exception":true }, "RoleARN":{ @@ -2165,7 +2165,7 @@ "documentation":"

The value of the key-value tag. The value is optional.

" } }, - "documentation":"

A key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management Guide.

" + "documentation":"

A key-value pair (the value is optional) that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.

" }, "TagKey":{ "type":"string", diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index e5289b83..8301e2d9 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -1023,6 +1023,7 @@ "shape":"BatchSize", "documentation":"

The maximum number of items to retrieve in a single batch.

" }, + "MaximumBatchingWindowInSeconds":{"shape":"MaximumBatchingWindowInSeconds"}, "StartingPosition":{ "shape":"EventSourcePosition", "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.

" @@ -1315,6 +1316,7 @@ "shape":"BatchSize", "documentation":"

The maximum number of items to retrieve in a single batch.

" }, + "MaximumBatchingWindowInSeconds":{"shape":"MaximumBatchingWindowInSeconds"}, "EventSourceArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the event source.

" @@ -2417,6 +2419,11 @@ "max":10000, "min":1 }, + "MaximumBatchingWindowInSeconds":{ + "type":"integer", + "max":300, + "min":0 + }, "MemorySize":{ "type":"integer", "max":3008, @@ -2980,7 +2987,8 @@ "BatchSize":{ "shape":"BatchSize", "documentation":"

The maximum number of items to retrieve in a single batch.

" - } + }, + "MaximumBatchingWindowInSeconds":{"shape":"MaximumBatchingWindowInSeconds"} } }, "UpdateFunctionCodeRequest":{ diff --git a/botocore/data/lightsail/2016-11-28/service-2.json b/botocore/data/lightsail/2016-11-28/service-2.json index 6bbad6a5..b0fa5749 100644 --- a/botocore/data/lightsail/2016-11-28/service-2.json +++ b/botocore/data/lightsail/2016-11-28/service-2.json @@ -48,7 +48,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attaches a block storage disk to a running or stopped Lightsail instance and exposes it to the instance with the specified disk name.

The attach disk operation supports tag-based access control via resource tags applied to the resource identified by diskName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Attaches a block storage disk to a running or stopped Lightsail instance and exposes it to the instance with the specified disk name.

The attach disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Lightsail Dev Guide.

" }, "AttachInstancesToLoadBalancer":{ "name":"AttachInstancesToLoadBalancer", @@ -67,7 +67,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attaches one or more Lightsail instances to a load balancer.

After some time, the instances are attached to the load balancer and the health check status is available.

The attach instances to load balancer operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Attaches one or more Lightsail instances to a load balancer.

After some time, the instances are attached to the load balancer and the health check status is available.

The attach instances to load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "AttachLoadBalancerTlsCertificate":{ "name":"AttachLoadBalancerTlsCertificate", @@ -86,7 +86,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Attaches a Transport Layer Security (TLS) certificate to your load balancer. TLS is just an updated, more secure version of Secure Socket Layer (SSL).

Once you create and validate your certificate, you can attach it to your load balancer. You can also use this API to rotate the certificates on your account. Use the AttachLoadBalancerTlsCertificate operation with the non-attached certificate, and it will replace the existing one and become the attached certificate.

The attach load balancer tls certificate operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Attaches a Transport Layer Security (TLS) certificate to your load balancer. TLS is just an updated, more secure version of Secure Socket Layer (SSL).

Once you create and validate your certificate, you can attach it to your load balancer. You can also use this API to rotate the certificates on your account. Use the attach load balancer tls certificate operation with the non-attached certificate, and it will replace the existing one and become the attached certificate.

The attach load balancer tls certificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "AttachStaticIp":{ "name":"AttachStaticIp", @@ -124,7 +124,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Closes the public ports on a specific Amazon Lightsail instance.

The close instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Closes the public ports on a specific Amazon Lightsail instance.

The close instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -143,7 +143,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Copies an instance or disk snapshot from one AWS Region to another in Amazon Lightsail.

" + "documentation":"

Copies a manual instance or disk snapshot as another manual snapshot, or copies an automatic instance or disk snapshot as a manual snapshot. This operation can also be used to copy a manual or automatic snapshot of an instance or a disk from one AWS Region to another in Amazon Lightsail.

When copying a manual snapshot, be sure to define the source region, source snapshot name, and target snapshot name parameters.

When copying an automatic snapshot, be sure to define the source region, source resource name, target snapshot name, and either the restore date or the use latest restorable auto snapshot parameters.

Database snapshots cannot be copied at this time.

" }, "CreateCloudFormationStack":{ "name":"CreateCloudFormationStack", @@ -181,7 +181,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a block storage disk that can be attached to a Lightsail instance in the same Availability Zone (e.g., us-east-2a). The disk is created in the regional endpoint that you send the HTTP request to. For more information, see Regions and Availability Zones in Lightsail.

The create disk operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a block storage disk that can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a).

The create disk operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" }, "CreateDiskFromSnapshot":{ "name":"CreateDiskFromSnapshot", @@ -200,7 +200,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a block storage disk from a disk snapshot that can be attached to a Lightsail instance in the same Availability Zone (e.g., us-east-2a). The disk is created in the regional endpoint that you send the HTTP request to. For more information, see Regions and Availability Zones in Lightsail.

The create disk from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by diskSnapshotName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting disk can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a).

The create disk from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by disk snapshot name. For more information, see the Lightsail Dev Guide.

" }, "CreateDiskSnapshot":{ "name":"CreateDiskSnapshot", @@ -257,7 +257,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates one of the following entry records associated with the domain: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).

The create domain entry operation supports tag-based access control via resource tags applied to the resource identified by domainName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one of the following entry records associated with the domain: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).

The create domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" }, "CreateInstanceSnapshot":{ "name":"CreateInstanceSnapshot", @@ -295,7 +295,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates one or more Amazon Lightsail virtual private servers, or instances. Create instances using active blueprints. Inactive blueprints are listed to support customers with existing instances but are not necessarily available for launch of new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases. Use the get blueprints operation to return a list of available blueprints.

The create instances operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one or more Amazon Lightsail instances.

The create instances operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" }, "CreateInstancesFromSnapshot":{ "name":"CreateInstancesFromSnapshot", @@ -314,7 +314,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Uses a specific snapshot as a blueprint for creating one or more new instances that are based on that identical configuration.

The create instances from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by instanceSnapshotName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one or more new instances from a manual or automatic snapshot of an instance.

The create instances from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by instance snapshot name. For more information, see the Lightsail Dev Guide.

" }, "CreateKeyPair":{ "name":"CreateKeyPair", @@ -371,7 +371,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a Lightsail load balancer TLS certificate.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

The create load balancer tls certificate operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates a Lightsail load balancer TLS certificate.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

The create load balancer tls certificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "CreateRelationalDatabase":{ "name":"CreateRelationalDatabase", @@ -430,6 +430,24 @@ ], "documentation":"

Creates a snapshot of your database in Amazon Lightsail. You can use snapshots for backups, to make copies of a database, and to save data before deleting a database.

The create relational database snapshot operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.

" }, + "DeleteAutoSnapshot":{ + "name":"DeleteAutoSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAutoSnapshotRequest"}, + "output":{"shape":"DeleteAutoSnapshotResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"OperationFailureException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Deletes an automatic snapshot for an instance or disk.

" + }, "DeleteDisk":{ "name":"DeleteDisk", "http":{ @@ -447,7 +465,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified block storage disk. The disk must be in the available state (not attached to a Lightsail instance).

The disk may remain in the deleting state for several minutes.

The delete disk operation supports tag-based access control via resource tags applied to the resource identified by diskName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified block storage disk. The disk must be in the available state (not attached to a Lightsail instance).

The disk may remain in the deleting state for several minutes.

The delete disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Lightsail Dev Guide.

" }, "DeleteDiskSnapshot":{ "name":"DeleteDiskSnapshot", @@ -466,7 +484,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified disk snapshot.

When you make periodic snapshots of a disk, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the disk.

The delete disk snapshot operation supports tag-based access control via resource tags applied to the resource identified by diskSnapshotName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified disk snapshot.

When you make periodic snapshots of a disk, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the disk.

The delete disk snapshot operation supports tag-based access control via resource tags applied to the resource identified by disk snapshot name. For more information, see the Lightsail Dev Guide.

" }, "DeleteDomain":{ "name":"DeleteDomain", @@ -485,7 +503,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified domain recordset and all of its domain records.

The delete domain operation supports tag-based access control via resource tags applied to the resource identified by domainName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified domain recordset and all of its domain records.

The delete domain operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" }, "DeleteDomainEntry":{ "name":"DeleteDomainEntry", @@ -504,7 +522,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific domain entry.

The delete domain entry operation supports tag-based access control via resource tags applied to the resource identified by domainName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a specific domain entry.

The delete domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" }, "DeleteInstance":{ "name":"DeleteInstance", @@ -523,7 +541,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific Amazon Lightsail virtual private server, or instance.

The delete instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes an Amazon Lightsail instance.

The delete instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "DeleteInstanceSnapshot":{ "name":"DeleteInstanceSnapshot", @@ -542,7 +560,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific snapshot of a virtual private server (or instance).

The delete instance snapshot operation supports tag-based access control via resource tags applied to the resource identified by instanceSnapshotName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a specific snapshot of a virtual private server (or instance).

The delete instance snapshot operation supports tag-based access control via resource tags applied to the resource identified by instance snapshot name. For more information, see the Lightsail Dev Guide.

" }, "DeleteKeyPair":{ "name":"DeleteKeyPair", @@ -561,7 +579,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a specific SSH key pair.

The delete key pair operation supports tag-based access control via resource tags applied to the resource identified by keyPairName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a specific SSH key pair.

The delete key pair operation supports tag-based access control via resource tags applied to the resource identified by key pair name. For more information, see the Lightsail Dev Guide.

" }, "DeleteKnownHostKeys":{ "name":"DeleteKnownHostKeys", @@ -599,7 +617,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.

The delete load balancer operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.

The delete load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "DeleteLoadBalancerTlsCertificate":{ "name":"DeleteLoadBalancerTlsCertificate", @@ -618,7 +636,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes an SSL/TLS certificate associated with a Lightsail load balancer.

The delete load balancer tls certificate operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes an SSL/TLS certificate associated with a Lightsail load balancer.

The delete load balancer tls certificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "DeleteRelationalDatabase":{ "name":"DeleteRelationalDatabase", @@ -675,7 +693,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Detaches a stopped block storage disk from a Lightsail instance. Make sure to unmount any file systems on the device within your operating system before stopping the instance and detaching the disk.

The detach disk operation supports tag-based access control via resource tags applied to the resource identified by diskName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Detaches a stopped block storage disk from a Lightsail instance. Make sure to unmount any file systems on the device within your operating system before stopping the instance and detaching the disk.

The detach disk operation supports tag-based access control via resource tags applied to the resource identified by disk name. For more information, see the Lightsail Dev Guide.

" }, "DetachInstancesFromLoadBalancer":{ "name":"DetachInstancesFromLoadBalancer", @@ -694,7 +712,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Detaches the specified instances from a Lightsail load balancer.

This operation waits until the instances are no longer needed before they are detached from the load balancer.

The detach instances from load balancer operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Detaches the specified instances from a Lightsail load balancer.

This operation waits until the instances are no longer needed before they are detached from the load balancer.

The detach instances from load balancer operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "DetachStaticIp":{ "name":"DetachStaticIp", @@ -715,6 +733,24 @@ ], "documentation":"

Detaches a static IP from the Amazon Lightsail instance to which it is attached.

" }, + "DisableAddOn":{ + "name":"DisableAddOn", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableAddOnRequest"}, + "output":{"shape":"DisableAddOnResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"OperationFailureException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Disables an add-on for an Amazon Lightsail resource. For more information, see the Lightsail Dev Guide.

" + }, "DownloadDefaultKeyPair":{ "name":"DownloadDefaultKeyPair", "http":{ @@ -734,6 +770,24 @@ ], "documentation":"

Downloads the default SSH key pair from the user's account.

" }, + "EnableAddOn":{ + "name":"EnableAddOn", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableAddOnRequest"}, + "output":{"shape":"EnableAddOnResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"OperationFailureException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Enables or modifies an add-on for an Amazon Lightsail resource. For more information, see the Lightsail Dev Guide.

" + }, "ExportSnapshot":{ "name":"ExportSnapshot", "http":{ @@ -751,7 +805,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack operation to create new Amazon EC2 instances.

Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.

The export snapshot operation supports tag-based access control via resource tags applied to the resource identified by sourceSnapshotName. For more information, see the Lightsail Dev Guide.

Use the get instance snapshots or get disk snapshots operations to get a list of snapshots that you can export to Amazon EC2.

" + "documentation":"

Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack operation to create new Amazon EC2 instances.

Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.

The export snapshot operation supports tag-based access control via resource tags applied to the resource identified by source snapshot name. For more information, see the Lightsail Dev Guide.

Use the get instance snapshots or get disk snapshots operations to get a list of snapshots that you can export to Amazon EC2.

" }, "GetActiveNames":{ "name":"GetActiveNames", @@ -772,6 +826,24 @@ ], "documentation":"

Returns the names of all active (not deleted) resources.

" }, + "GetAutoSnapshots":{ + "name":"GetAutoSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAutoSnapshotsRequest"}, + "output":{"shape":"GetAutoSnapshotsResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"OperationFailureException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the available automatic snapshots for the specified resource name. For more information, see the Lightsail Dev Guide.

" + }, "GetBlueprints":{ "name":"GetBlueprints", "http":{ @@ -789,7 +861,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the list of available instance images, or blueprints. You can use a blueprint to create a new virtual private server already running a specific operating system, as well as a preinstalled app or development stack. The software each instance is running depends on the blueprint image you choose.

" + "documentation":"

Returns the list of available instance images, or blueprints. You can use a blueprint to create a new instance already running a specific operating system, as well as a preinstalled app or development stack. The software each instance is running depends on the blueprint image you choose.

Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.

" }, "GetBundles":{ "name":"GetBundles", @@ -998,7 +1070,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.

The get instance access details operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.

The get instance access details operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "GetInstanceMetricData":{ "name":"GetInstanceMetricData", @@ -1435,7 +1507,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the current, previous, or pending versions of the master user password for a Lightsail database.

The asdf operation GetRelationalDatabaseMasterUserPassword supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName.

" + "documentation":"

Returns the current, previous, or pending versions of the master user password for a Lightsail database.

The GetRelationalDatabaseMasterUserPassword operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName.

" }, "GetRelationalDatabaseMetricData":{ "name":"GetRelationalDatabaseMetricData", @@ -1625,7 +1697,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Adds public ports to an Amazon Lightsail instance.

The open instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Adds public ports to an Amazon Lightsail instance.

The open instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "PeerVpc":{ "name":"PeerVpc", @@ -1663,7 +1735,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Sets the specified open ports for an Amazon Lightsail instance, and closes all ports for every protocol not included in the current request.

The put instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Sets the specified open ports for an Amazon Lightsail instance, and closes all ports for every protocol not included in the current request.

The put instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "RebootInstance":{ "name":"RebootInstance", @@ -1682,7 +1754,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Restarts a specific instance.

The reboot instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Restarts a specific instance.

The reboot instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "RebootRelationalDatabase":{ "name":"RebootRelationalDatabase", @@ -1739,7 +1811,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance operation.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The start instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance operation.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The start instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "StartRelationalDatabase":{ "name":"StartRelationalDatabase", @@ -1777,7 +1849,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Stops a specific Amazon Lightsail instance that is currently running.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The stop instance operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Stops a specific Amazon Lightsail instance that is currently running.

When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.

The stop instance operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" }, "StopRelationalDatabase":{ "name":"StopRelationalDatabase", @@ -1815,7 +1887,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Adds one or more tags to the specified Amazon Lightsail resource. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see the Lightsail Dev Guide.

The tag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resourceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Adds one or more tags to the specified Amazon Lightsail resource. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see the Lightsail Dev Guide.

The tag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Lightsail Dev Guide.

" }, "UnpeerVpc":{ "name":"UnpeerVpc", @@ -1853,7 +1925,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.

The untag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resourceName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.

The untag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Lightsail Dev Guide.

" }, "UpdateDomainEntry":{ "name":"UpdateDomainEntry", @@ -1872,7 +1944,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Updates a domain recordset after it is created.

The update domain entry operation supports tag-based access control via resource tags applied to the resource identified by domainName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Updates a domain recordset after it is created.

The update domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" }, "UpdateLoadBalancerAttribute":{ "name":"UpdateLoadBalancerAttribute", @@ -1891,7 +1963,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Updates the specified attribute for a load balancer. You can only update one attribute at a time.

The update load balancer attribute operation supports tag-based access control via resource tags applied to the resource identified by loadBalancerName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Updates the specified attribute for a load balancer. You can only update one attribute at a time.

The update load balancer attribute operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "UpdateRelationalDatabase":{ "name":"UpdateRelationalDatabase", @@ -1962,6 +2034,55 @@ "documentation":"

Lightsail throws this exception when an account is still in the setup in progress state.

", "exception":true }, + "AddOn":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

The name of the add-on.

" + }, + "status":{ + "shape":"string", + "documentation":"

The status of the add-on.

" + }, + "snapshotTimeOfDay":{ + "shape":"TimeOfDay", + "documentation":"

The daily time when an automatic snapshot is created.

The time shown is in HH:00 format, and in Coordinated Universal Time (UTC).

The snapshot is automatically created between the time shown and up to 45 minutes after.

" + }, + "nextSnapshotTimeOfDay":{ + "shape":"TimeOfDay", + "documentation":"

The next daily time an automatic snapshot will be created.

The time shown is in HH:00 format, and in Coordinated Universal Time (UTC).

The snapshot is automatically created between the time shown and up to 45 minutes after.

" + } + }, + "documentation":"

Describes an add-on that is enabled for an Amazon Lightsail resource.

" + }, + "AddOnList":{ + "type":"list", + "member":{"shape":"AddOn"} + }, + "AddOnRequest":{ + "type":"structure", + "required":["addOnType"], + "members":{ + "addOnType":{ + "shape":"AddOnType", + "documentation":"

The add-on type.

" + }, + "autoSnapshotAddOnRequest":{ + "shape":"AutoSnapshotAddOnRequest", + "documentation":"

An object that represents additional parameters when enabling or modifying the automatic snapshot add-on.

" + } + }, + "documentation":"

Describes a request to enable, modify, or disable an add-on for an Amazon Lightsail resource.

An additional cost may be associated with enabling add-ons. For more information, see the Lightsail pricing page.

" + }, + "AddOnRequestList":{ + "type":"list", + "member":{"shape":"AddOnRequest"} + }, + "AddOnType":{ + "type":"string", + "enum":["AutoSnapshot"] + }, "AllocateStaticIpRequest":{ "type":"structure", "required":["staticIpName"], @@ -2090,11 +2211,78 @@ } } }, + "AttachedDisk":{ + "type":"structure", + "members":{ + "path":{ + "shape":"string", + "documentation":"

The path of the disk (e.g., /dev/xvdf).

" + }, + "sizeInGb":{ + "shape":"integer", + "documentation":"

The size of the disk in GB.

" + } + }, + "documentation":"

Describes a block storage disk that is attached to an instance, and is included in an automatic snapshot.

" + }, + "AttachedDiskList":{ + "type":"list", + "member":{"shape":"AttachedDisk"} + }, "AttachedDiskMap":{ "type":"map", "key":{"shape":"ResourceName"}, "value":{"shape":"DiskMapList"} }, + "AutoSnapshotAddOnRequest":{ + "type":"structure", + "members":{ + "snapshotTimeOfDay":{ + "shape":"TimeOfDay", + "documentation":"

The daily time when an automatic snapshot will be created.

Constraints:

" + } + }, + "documentation":"

Describes a request to enable or modify the automatic snapshot add-on for an Amazon Lightsail instance or disk.

When you modify the automatic snapshot time for a resource, it is typically effective immediately except under the following conditions:

" + }, + "AutoSnapshotDate":{ + "type":"string", + "pattern":"^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + }, + "AutoSnapshotDetails":{ + "type":"structure", + "members":{ + "date":{ + "shape":"string", + "documentation":"

The date of the automatic snapshot in YYYY-MM-DD format.

" + }, + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the automatic snapshot was created.

" + }, + "status":{ + "shape":"AutoSnapshotStatus", + "documentation":"

The status of the automatic snapshot.

" + }, + "fromAttachedDisks":{ + "shape":"AttachedDiskList", + "documentation":"

An array of objects that describe the block storage disks attached to the instance when the automatic snapshot was created.

" + } + }, + "documentation":"

Describes an automatic snapshot.

" + }, + "AutoSnapshotDetailsList":{ + "type":"list", + "member":{"shape":"AutoSnapshotDetails"} + }, + "AutoSnapshotStatus":{ + "type":"string", + "enum":[ + "Success", + "Failed", + "InProgress", + "NotFound" + ] + }, "AvailabilityZone":{ "type":"structure", "members":{ @@ -2330,14 +2518,25 @@ "CopySnapshotRequest":{ "type":"structure", "required":[ - "sourceSnapshotName", "targetSnapshotName", "sourceRegion" ], "members":{ "sourceSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the source instance or disk snapshot to be copied.

" + "documentation":"

The name of the source instance or disk snapshot to be copied.

Define this parameter only when copying a manual snapshot as another manual snapshot.

" + }, + "sourceResourceName":{ + "shape":"string", + "documentation":"

The name of the source resource from which the automatic snapshot was created.

Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + }, + "restoreDate":{ + "shape":"string", + "documentation":"

The date of the automatic snapshot to copy for the new manual snapshot.

Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + }, + "useLatestRestorableAutoSnapshot":{ + "shape":"boolean", + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" }, "targetSnapshotName":{ "shape":"ResourceName", @@ -2345,7 +2544,7 @@ }, "sourceRegion":{ "shape":"RegionName", - "documentation":"

The AWS Region where the source snapshot is located.

" + "documentation":"

The AWS Region where the source manual or automatic snapshot is located.

" } } }, @@ -2381,7 +2580,6 @@ "type":"structure", "required":[ "diskName", - "diskSnapshotName", "availabilityZone", "sizeInGb" ], @@ -2392,7 +2590,7 @@ }, "diskSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the disk snapshot (e.g., my-snapshot) from which to create the new storage disk.

" + "documentation":"

The name of the disk snapshot (e.g., my-snapshot) from which to create the new storage disk.

This parameter cannot be defined together with the source disk name parameter. The disk snapshot name and source disk name parameters are mutually exclusive.

" }, "availabilityZone":{ "shape":"NonEmptyString", @@ -2405,6 +2603,22 @@ "tags":{ "shape":"TagList", "documentation":"

The tag keys and optional values to add to the resource during create.

To tag a resource after it has been created, see the tag resource operation.

" + }, + "addOns":{ + "shape":"AddOnRequestList", + "documentation":"

An array of objects that represent the add-ons to enable for the new disk.

" + }, + "sourceDiskName":{ + "shape":"string", + "documentation":"

The name of the source disk from which the source automatic snapshot was created.

This parameter cannot be defined together with the disk snapshot name parameter. The source disk name and disk snapshot name parameters are mutually exclusive.

Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + }, + "restoreDate":{ + "shape":"string", + "documentation":"

The date of the automatic snapshot to use for the new disk.

Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + }, + "useLatestRestorableAutoSnapshot":{ + "shape":"boolean", + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" } } }, @@ -2431,7 +2645,7 @@ }, "availabilityZone":{ "shape":"NonEmptyString", - "documentation":"

The Availability Zone where you want to create the disk (e.g., us-east-2a). Choose the same Availability Zone as the Lightsail instance where you want to create the disk.

Use the GetRegions operation to list the Availability Zones where Lightsail is currently available.

" + "documentation":"

The Availability Zone where you want to create the disk (e.g., us-east-2a). Use the same Availability Zone as the Lightsail instance to which you want to attach the disk.

Use the get regions operation to list the Availability Zones where Lightsail is currently available.

" }, "sizeInGb":{ "shape":"integer", @@ -2440,6 +2654,10 @@ "tags":{ "shape":"TagList", "documentation":"

The tag keys and optional values to add to the resource during create.

To tag a resource after it has been created, see the tag resource operation.

" + }, + "addOns":{ + "shape":"AddOnRequestList", + "documentation":"

An array of objects that represent the add-ons to enable for the new disk.

" } } }, @@ -2567,7 +2785,6 @@ "required":[ "instanceNames", "availabilityZone", - "instanceSnapshotName", "bundleId" ], "members":{ @@ -2585,7 +2802,7 @@ }, "instanceSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the instance snapshot on which you are basing your new instances. Use the get instance snapshots operation to return information about your existing snapshots.

" + "documentation":"

The name of the instance snapshot on which you are basing your new instances. Use the get instance snapshots operation to return information about your existing snapshots.

This parameter cannot be defined together with the source instance name parameter. The instance snapshot name and source instance name parameters are mutually exclusive.

" }, "bundleId":{ "shape":"NonEmptyString", @@ -2602,6 +2819,22 @@ "tags":{ "shape":"TagList", "documentation":"

The tag keys and optional values to add to the resource during create.

To tag a resource after it has been created, see the tag resource operation.

" + }, + "addOns":{ + "shape":"AddOnRequestList", + "documentation":"

An array of objects representing the add-ons to enable for the new instance.

" + }, + "sourceInstanceName":{ + "shape":"string", + "documentation":"

The name of the source instance from which the source automatic snapshot was created.

This parameter cannot be defined together with the instance snapshot name parameter. The source instance name and instance snapshot name parameters are mutually exclusive.

Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + }, + "restoreDate":{ + "shape":"string", + "documentation":"

The date of the automatic snapshot to use for the new instance.

Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + }, + "useLatestRestorableAutoSnapshot":{ + "shape":"boolean", + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" } } }, @@ -2638,7 +2871,7 @@ }, "blueprintId":{ "shape":"NonEmptyString", - "documentation":"

The ID for a virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0). Use the get blueprints operation to return a list of available images (or blueprints).

" + "documentation":"

The ID for a virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0). Use the get blueprints operation to return a list of available images (or blueprints).

Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.

" }, "bundleId":{ "shape":"NonEmptyString", @@ -2655,6 +2888,10 @@ "tags":{ "shape":"TagList", "documentation":"

The tag keys and optional values to add to the resource during create.

To tag a resource after it has been created, see the tag resource operation.

" + }, + "addOns":{ + "shape":"AddOnRequestList", + "documentation":"

An array of objects representing the add-ons to enable for the new instance.

" } } }, @@ -2817,7 +3054,7 @@ }, "restoreTime":{ "shape":"IsoDate", - "documentation":"

The date and time to restore your database from.

Constraints:

" + "documentation":"

The date and time to restore your database from.

Constraints:

" }, "useLatestRestorableTime":{ "shape":"boolean", @@ -2878,11 +3115,11 @@ }, "preferredBackupWindow":{ "shape":"string", - "documentation":"

The daily time range during which automated backups are created for your new database if automated backups are enabled.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. For more information about the preferred backup window time blocks for each region, see the Working With Backups guide in the Amazon Relational Database Service (Amazon RDS) documentation.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created for your new database if automated backups are enabled.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. For more information about the preferred backup window time blocks for each region, see the Working With Backups guide in the Amazon Relational Database Service (Amazon RDS) documentation.

Constraints:

" }, "preferredMaintenanceWindow":{ "shape":"string", - "documentation":"

The weekly time range during which system maintenance can occur on your new database.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Constraints:

" + "documentation":"

The weekly time range during which system maintenance can occur on your new database.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Constraints:

" }, "publiclyAccessible":{ "shape":"boolean", @@ -2933,6 +3170,32 @@ } } }, + "DeleteAutoSnapshotRequest":{ + "type":"structure", + "required":[ + "resourceName", + "date" + ], + "members":{ + "resourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the source resource from which to delete the automatic snapshot.

" + }, + "date":{ + "shape":"AutoSnapshotDate", + "documentation":"

The date of the automatic snapshot to delete in YYYY-MM-DD format.

Use the get auto snapshots operation to get the available automatic snapshots for a resource.

" + } + } + }, + "DeleteAutoSnapshotResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of your request.

" + } + } + }, "DeleteDiskRequest":{ "type":"structure", "required":["diskName"], @@ -2940,6 +3203,10 @@ "diskName":{ "shape":"ResourceName", "documentation":"

The unique name of the disk you want to delete (e.g., my-disk).

" + }, + "forceDeleteAddOns":{ + "shape":"boolean", + "documentation":"

A Boolean value to indicate whether to delete the enabled add-ons for the disk.

" } } }, @@ -2948,7 +3215,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An object describing the API operations.

" + "documentation":"

An array of objects that describe the result of your request.

" } } }, @@ -3023,6 +3290,10 @@ "instanceName":{ "shape":"ResourceName", "documentation":"

The name of the instance to delete.

" + }, + "forceDeleteAddOns":{ + "shape":"boolean", + "documentation":"

A Boolean value to indicate whether to delete the enabled add-ons for the disk.

" } } }, @@ -3265,6 +3536,32 @@ } } }, + "DisableAddOnRequest":{ + "type":"structure", + "required":[ + "addOnType", + "resourceName" + ], + "members":{ + "addOnType":{ + "shape":"AddOnType", + "documentation":"

The add-on type to disable.

" + }, + "resourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the source resource from which to disable the add-on.

" + } + } + }, + "DisableAddOnResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of your request.

" + } + } + }, "Disk":{ "type":"structure", "members":{ @@ -3296,6 +3593,10 @@ "shape":"TagList", "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" }, + "addOns":{ + "shape":"AddOnList", + "documentation":"

An array of objects representing the add-ons enabled on the disk.

" + }, "sizeInGb":{ "shape":"integer", "documentation":"

The size of the disk in GB.

" @@ -3335,7 +3636,7 @@ "deprecated":true } }, - "documentation":"

Describes a system disk or an block storage disk.

" + "documentation":"

Describes a system disk or a block storage disk.

" }, "DiskInfo":{ "type":"structure", @@ -3443,6 +3744,10 @@ "fromInstanceArn":{ "shape":"NonEmptyString", "documentation":"

The Amazon Resource Name (ARN) of the source instance from which the disk (system volume) snapshot was created.

" + }, + "isFromAutoSnapshot":{ + "shape":"boolean", + "documentation":"

A Boolean value indicating whether the snapshot was created from an automatic snapshot.

" } }, "documentation":"

Describes a block storage disk snapshot.

" @@ -3587,6 +3892,32 @@ } } }, + "EnableAddOnRequest":{ + "type":"structure", + "required":[ + "resourceName", + "addOnRequest" + ], + "members":{ + "resourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the source resource for which to enable or modify the add-on.

" + }, + "addOnRequest":{ + "shape":"AddOnRequest", + "documentation":"

An array of strings representing the add-on to enable or modify.

" + } + } + }, + "EnableAddOnResult":{ + "type":"structure", + "members":{ + "operations":{ + "shape":"OperationList", + "documentation":"

An array of objects that describe the result of your request.

" + } + } + }, "ExportSnapshotRecord":{ "type":"structure", "members":{ @@ -3715,6 +4046,33 @@ } } }, + "GetAutoSnapshotsRequest":{ + "type":"structure", + "required":["resourceName"], + "members":{ + "resourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the source resource from which to get automatic snapshot information.

" + } + } + }, + "GetAutoSnapshotsResult":{ + "type":"structure", + "members":{ + "resourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the source resource for the automatic snapshots.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type (e.g., Instance or Disk).

" + }, + "autoSnapshots":{ + "shape":"AutoSnapshotDetailsList", + "documentation":"

An array of objects that describe the automatic snapshots that are available for the specified source resource.asdf

" + } + } + }, "GetBlueprintsRequest":{ "type":"structure", "members":{ @@ -4475,15 +4833,15 @@ }, "startTime":{ "shape":"IsoDate", - "documentation":"

The start of the time interval from which to get log events.

Constraints:

" + "documentation":"

The start of the time interval from which to get log events.

Constraints:

" }, "endTime":{ "shape":"IsoDate", - "documentation":"

The end of the time interval from which to get log events.

Constraints:

" + "documentation":"

The end of the time interval from which to get log events.

Constraints:

" }, "startFromHead":{ "shape":"boolean", - "documentation":"

Parameter to specify if the log should start from head or tail. If true is specified, the log event starts from the head of the log. If false is specified, the log event starts from the tail of the log.

Default: false

" + "documentation":"

Parameter to specify if the log should start from head or tail. If true is specified, the log event starts from the head of the log. If false is specified, the log event starts from the tail of the log.

For PostgreSQL, the default value of false is the only option available.

" }, "pageToken":{ "shape":"string", @@ -4580,11 +4938,11 @@ }, "startTime":{ "shape":"IsoDate", - "documentation":"

The start of the time interval from which to get metric data.

Constraints:

" + "documentation":"

The start of the time interval from which to get metric data.

Constraints:

" }, "endTime":{ "shape":"IsoDate", - "documentation":"

The end of the time interval from which to get metric data.

Constraints:

" + "documentation":"

The end of the time interval from which to get metric data.

Constraints:

" }, "unit":{ "shape":"MetricUnit", @@ -4866,6 +5224,10 @@ "shape":"NonEmptyString", "documentation":"

The bundle for the instance (e.g., micro_1_0).

" }, + "addOns":{ + "shape":"AddOnList", + "documentation":"

An array of objects representing the add-ons enabled on the instance.

" + }, "isStaticIp":{ "shape":"boolean", "documentation":"

A Boolean value indicating whether this instance has a static IP assigned to it.

" @@ -5224,12 +5586,16 @@ "shape":"string", "documentation":"

The bundle ID from which you created the snapshot (e.g., micro_1_0).

" }, + "isFromAutoSnapshot":{ + "shape":"boolean", + "documentation":"

A Boolean value indicating whether the snapshot was created from an automatic snapshot.

" + }, "sizeInGb":{ "shape":"integer", "documentation":"

The size in GB of the SSD.

" } }, - "documentation":"

Describes the snapshot of the virtual private server, or instance.

" + "documentation":"

Describes an instance snapshot.

" }, "InstanceSnapshotInfo":{ "type":"structure", @@ -5904,7 +6270,7 @@ }, "location":{ "shape":"ResourceLocation", - "documentation":"

The region and Availability Zone.

" + "documentation":"

The AWS Region and Availability Zone.

" }, "isTerminal":{ "shape":"boolean", @@ -6009,7 +6375,9 @@ "UpdateRelationalDatabaseParameters", "StartRelationalDatabase", "RebootRelationalDatabase", - "StopRelationalDatabase" + "StopRelationalDatabase", + "EnableAddOn", + "DisableAddOn" ] }, "PasswordData":{ @@ -6661,6 +7029,10 @@ } } }, + "ResourceArn":{ + "type":"string", + "pattern":"^arn:(aws[^:]*):([a-zA-Z0-9-]+):([a-z0-9-]+):([0-9]+):([a-zA-Z]+)/([a-zA-Z0-9-]+)$" + }, "ResourceLocation":{ "type":"structure", "members":{ @@ -6891,6 +7263,10 @@ "shape":"ResourceName", "documentation":"

The name of the resource to which you are adding tags.

" }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to which you want to add a tag.

" + }, "tags":{ "shape":"TagList", "documentation":"

The tag key and optional value.

" @@ -6907,6 +7283,10 @@ } }, "TagValue":{"type":"string"}, + "TimeOfDay":{ + "type":"string", + "pattern":"^(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" + }, "UnauthenticatedException":{ "type":"structure", "members":{ @@ -6943,6 +7323,10 @@ "shape":"ResourceName", "documentation":"

The name of the resource from which you are removing a tag.

" }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource from which you want to remove a tag.

" + }, "tagKeys":{ "shape":"TagKeyList", "documentation":"

The tag keys to delete from the specified resource.

" @@ -7059,11 +7443,11 @@ }, "preferredBackupWindow":{ "shape":"string", - "documentation":"

The daily time range during which automated backups are created for your database if automated backups are enabled.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created for your database if automated backups are enabled.

Constraints:

" }, "preferredMaintenanceWindow":{ "shape":"string", - "documentation":"

The weekly time range during which system maintenance can occur on your database.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Constraints:

" + "documentation":"

The weekly time range during which system maintenance can occur on your database.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Constraints:

" }, "enableBackupRetention":{ "shape":"boolean", diff --git a/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json b/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json index 1b773f64..4eb05ebc 100644 --- a/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json +++ b/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json @@ -63,6 +63,8 @@ "daily_business_canceled_product_subscribers", "monthly_revenue_billing_and_revenue_data", "monthly_revenue_annual_subscriptions", + "monthly_revenue_field_demonstration_usage", + "monthly_revenue_flexible_payment_schedule", "disbursed_amount_by_product", "disbursed_amount_by_product_with_uncollected_funds", "disbursed_amount_by_instance_hours", @@ -97,7 +99,7 @@ "members":{ "dataSetType":{ "shape":"DataSetType", - "documentation":"

The desired data set type.

" + "documentation":"

The desired data set type.

" }, "dataSetPublicationDate":{ "shape":"DataSetPublicationDate", diff --git a/botocore/data/mediaconnect/2018-11-14/service-2.json b/botocore/data/mediaconnect/2018-11-14/service-2.json index f1fbc454..63c04400 100644 --- a/botocore/data/mediaconnect/2018-11-14/service-2.json +++ b/botocore/data/mediaconnect/2018-11-14/service-2.json @@ -780,7 +780,7 @@ "SmoothingLatency": { "shape": "__integer", "locationName": "smoothingLatency", - "documentation": "The smoothing latency in milliseconds for RTP and RTP-FEC streams." + "documentation": "The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams." }, "StreamId": { "shape": "__string", @@ -994,6 +994,11 @@ "Entitlement": { "type": "structure", "members": { + "DataTransferSubscriberFeePercent": { + "shape": "__integer", + "locationName": "dataTransferSubscriberFeePercent", + "documentation": "Percentage from 0-100 of the data transfer cost to be billed to the subscriber." + }, "Description": { "shape": "__string", "locationName": "description", @@ -1107,6 +1112,11 @@ "GrantEntitlementRequest": { "type": "structure", "members": { + "DataTransferSubscriberFeePercent": { + "shape": "__integer", + "locationName": "dataTransferSubscriberFeePercent", + "documentation": "Percentage from 0-100 of the data transfer cost to be billed to the subscriber." + }, "Description": { "shape": "__string", "locationName": "description", @@ -1303,6 +1313,11 @@ "ListedEntitlement": { "type": "structure", "members": { + "DataTransferSubscriberFeePercent": { + "shape": "__integer", + "locationName": "dataTransferSubscriberFeePercent", + "documentation": "Percentage from 0-100 of the data transfer cost to be billed to the subscriber." + }, "EntitlementArn": { "shape": "__string", "locationName": "entitlementArn", @@ -1404,6 +1419,11 @@ "Output": { "type": "structure", "members": { + "DataTransferSubscriberFeePercent": { + "shape": "__integer", + "locationName": "dataTransferSubscriberFeePercent", + "documentation": "Percentage from 0-100 of the data transfer cost to be billed to the subscriber." + }, "Description": { "shape": "__string", "locationName": "description", @@ -1462,7 +1482,8 @@ "zixi-push", "rtp-fec", "rtp", - "zixi-pull" + "zixi-pull", + "rist" ] }, "RemoveFlowOutputRequest": { @@ -1595,12 +1616,12 @@ "MaxBitrate": { "shape": "__integer", "locationName": "maxBitrate", - "documentation": "The smoothing max bitrate for RTP and RTP-FEC streams." + "documentation": "The smoothing max bitrate for RIST, RTP, and RTP-FEC streams." }, "MaxLatency": { "shape": "__integer", "locationName": "maxLatency", - "documentation": "The maximum latency in milliseconds for Zixi-based streams." + "documentation": "The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams." }, "Name": { "shape": "__string", @@ -1628,6 +1649,11 @@ "Source": { "type": "structure", "members": { + "DataTransferSubscriberFeePercent": { + "shape": "__integer", + "locationName": "dataTransferSubscriberFeePercent", + "documentation": "Percentage from 0-100 of the data transfer cost to be billed to the subscriber." + }, "Decryption": { "shape": "Encryption", "locationName": "decryption", @@ -1807,12 +1833,12 @@ "MaxBitrate": { "shape": "__integer", "locationName": "maxBitrate", - "documentation": "The smoothing max bitrate for RTP and RTP-FEC streams." + "documentation": "The smoothing max bitrate for RIST, RTP, and RTP-FEC streams." }, "MaxLatency": { "shape": "__integer", "locationName": "maxLatency", - "documentation": "The maximum latency in milliseconds for Zixi-based streams." + "documentation": "The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams." }, "Protocol": { "shape": "Protocol", @@ -1827,7 +1853,7 @@ "SmoothingLatency": { "shape": "__integer", "locationName": "smoothingLatency", - "documentation": "The smoothing latency in milliseconds for RTP and RTP-FEC streams." + "documentation": "The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams." }, "StreamId": { "shape": "__string", @@ -2021,7 +2047,7 @@ "SmoothingLatency": { "shape": "__integer", "locationName": "smoothingLatency", - "documentation": "The smoothing latency in milliseconds for RTP and RTP-FEC streams." + "documentation": "The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams." }, "StreamId": { "shape": "__string", @@ -2081,12 +2107,12 @@ "MaxBitrate": { "shape": "__integer", "locationName": "maxBitrate", - "documentation": "The smoothing max bitrate for RTP and RTP-FEC streams." + "documentation": "The smoothing max bitrate for RIST, RTP, and RTP-FEC streams." }, "MaxLatency": { "shape": "__integer", "locationName": "maxLatency", - "documentation": "The maximum latency in milliseconds for Zixi-based streams." + "documentation": "The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams." }, "Protocol": { "shape": "Protocol", diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 2bca4a53..41e1211b 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1286,9 +1286,9 @@ "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { - "shape": "__integerMin1Max2", + "shape": "__integerMin1Max64", "locationName": "channels", - "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." + "documentation": "Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64." }, "SampleRate": { "shape": "__integerMin8000Max192000", @@ -1298,17 +1298,43 @@ }, "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF." }, + "AncillaryConvert608To708": { + "type": "string", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "enum": [ + "UPCONVERT", + "DISABLED" + ] + }, "AncillarySourceSettings": { "type": "structure", "members": { + "Convert608To708": { + "shape": "AncillaryConvert608To708", + "locationName": "convert608To708", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + }, "SourceAncillaryChannelNumber": { "shape": "__integerMin1Max4", "locationName": "sourceAncillaryChannelNumber", "documentation": "Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for passthrough." + }, + "TerminateCaptions": { + "shape": "AncillaryTerminateCaptions", + "locationName": "terminateCaptions", + "documentation": "By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting." } }, "documentation": "Settings for ancillary captions source." }, + "AncillaryTerminateCaptions": { + "type": "string", + "documentation": "By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting.", + "enum": [ + "END_OF_INPUT", + "DISABLED" + ] + }, "AntiAlias": { "type": "string", "documentation": "The anti-alias filter is automatically applied to all outputs. The service no longer accepts the value DISABLED for AntiAlias. If you specify that in your job, the service will ignore the setting.", @@ -1891,7 +1917,7 @@ "DestinationType": { "shape": "CaptionDestinationType", "locationName": "destinationType", - "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20)." + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20)." }, "DvbSubDestinationSettings": { "shape": "DvbSubDestinationSettings", @@ -1903,6 +1929,11 @@ "locationName": "embeddedDestinationSettings", "documentation": "Settings specific to embedded/ancillary caption outputs, including 608/708 Channel destination number." }, + "ImscDestinationSettings": { + "shape": "ImscDestinationSettings", + "locationName": "imscDestinationSettings", + "documentation": "Settings specific to IMSC caption outputs." + }, "SccDestinationSettings": { "shape": "SccDestinationSettings", "locationName": "sccDestinationSettings", @@ -1923,12 +1954,13 @@ }, "CaptionDestinationType": { "type": "string", - "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).", + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that complies with the SCTE-43 spec. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).", "enum": [ "BURN_IN", "DVB_SUB", "EMBEDDED", "EMBEDDED_PLUS_SCTE20", + "IMSC", "SCTE20_PLUS_EMBEDDED", "SCC", "SRT", @@ -1954,7 +1986,7 @@ "SourceSettings": { "shape": "CaptionSourceSettings", "locationName": "sourceSettings", - "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input." + "documentation": "If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." } }, "documentation": "Set up captions in your outputs by first selecting them from your input here." @@ -1980,7 +2012,7 @@ "FileSourceSettings": { "shape": "FileSourceSettings", "locationName": "fileSourceSettings", - "documentation": "Settings for File-based Captions in Source" + "documentation": "If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." }, "SourceType": { "shape": "CaptionSourceType", @@ -1995,10 +2027,10 @@ "TrackSourceSettings": { "shape": "TrackSourceSettings", "locationName": "trackSourceSettings", - "documentation": "Settings specific to caption sources that are specfied by track number. Sources include IMSC in IMF." + "documentation": "Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings." } }, - "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input." + "documentation": "If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." }, "CaptionSourceType": { "type": "string", @@ -2056,7 +2088,7 @@ "EncryptionMethod": { "shape": "CmafEncryptionType", "locationName": "encryptionMethod", - "documentation": "For DRM with CMAF, the encryption type is always sample AES." + "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR)." }, "InitializationVectorInManifest": { "shape": "CmafInitializationVectorInManifest", @@ -2066,7 +2098,7 @@ "SpekeKeyProvider": { "shape": "SpekeKeyProviderCmaf", "locationName": "spekeKeyProvider", - "documentation": "Use these settings when doing DRM encryption with a SPEKE-compliant key provider, if your output group type is CMAF. If your output group type is HLS, MS Smooth, or DASH, use the SpekeKeyProvider settings instead." + "documentation": "If your output group type is CMAF, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or Microsoft Smooth, use the SpekeKeyProvider settings instead." }, "StaticKeyProvider": { "shape": "StaticKeyProvider", @@ -2083,9 +2115,10 @@ }, "CmafEncryptionType": { "type": "string", - "documentation": "For DRM with CMAF, the encryption type is always sample AES.", + "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).", "enum": [ - "SAMPLE_AES" + "SAMPLE_AES", + "AES_CTR" ] }, "CmafGroupSettings": { @@ -2429,6 +2462,11 @@ "locationName": "settings", "documentation": "JobSettings contains all the transcode settings for a job." }, + "SimulateReservedQueue": { + "shape": "SimulateReservedQueue", + "locationName": "simulateReservedQueue", + "documentation": "Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default." + }, "StatusUpdateInterval": { "shape": "StatusUpdateInterval", "locationName": "statusUpdateInterval", @@ -2622,7 +2660,7 @@ "SpekeKeyProvider": { "shape": "SpekeKeyProvider", "locationName": "spekeKeyProvider", - "documentation": "Use these settings when doing DRM encryption with a SPEKE-compliant key provider, if your output group type is HLS, MS Smooth, or DASH. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." + "documentation": "If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." } }, "documentation": "Specifies DRM settings for DASH outputs." @@ -3527,7 +3565,7 @@ }, "EmbeddedConvert608To708": { "type": "string", - "documentation": "When set to UPCONVERT, 608 data is both passed through via the \"608 compatibility bytes\" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -3539,12 +3577,12 @@ "Destination608ChannelNumber": { "shape": "__integerMin1Max4", "locationName": "destination608ChannelNumber", - "documentation": "Ignore this setting unless your input captions are SCC format and your output captions are embedded in the video stream. Specify a CC number for each captions channel in this output. If you have two channels, pick CC numbers that aren't in the same field. For example, choose 1 and 3. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." + "documentation": "Ignore this setting unless your input captions are SCC format and your output captions are embedded in the video stream. Specify a CC number for each captions channel in this output. If you have two channels, choose CC numbers that aren't in the same field. For example, choose 1 and 3. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." }, "Destination708ServiceNumber": { "shape": "__integerMin1Max6", "locationName": "destination708ServiceNumber", - "documentation": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number you specify for CC channel number (destination608ChannelNumber) for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." + "documentation": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." } }, "documentation": "Settings specific to embedded/ancillary caption outputs, including 608/708 Channel destination number." @@ -3555,7 +3593,7 @@ "Convert608To708": { "shape": "EmbeddedConvert608To708", "locationName": "convert608To708", - "documentation": "When set to UPCONVERT, 608 data is both passed through via the \"608 compatibility bytes\" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "Source608ChannelNumber": { "shape": "__integerMin1Max4", @@ -3566,10 +3604,23 @@ "shape": "__integerMin1Max1", "locationName": "source608TrackNumber", "documentation": "Specifies the video track index used for extracting captions. The system only supports one input video track, so this should always be set to '1'." + }, + "TerminateCaptions": { + "shape": "EmbeddedTerminateCaptions", + "locationName": "terminateCaptions", + "documentation": "By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting." } }, "documentation": "Settings for embedded captions Source" }, + "EmbeddedTerminateCaptions": { + "type": "string", + "documentation": "By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting.", + "enum": [ + "END_OF_INPUT", + "DISABLED" + ] + }, "Endpoint": { "type": "structure", "members": { @@ -3670,7 +3721,7 @@ }, "FileSourceConvert608To708": { "type": "string", - "documentation": "If set to UPCONVERT, 608 caption data is both passed through via the \"608 compatibility bytes\" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -3682,12 +3733,12 @@ "Convert608To708": { "shape": "FileSourceConvert608To708", "locationName": "convert608To708", - "documentation": "If set to UPCONVERT, 608 caption data is both passed through via the \"608 compatibility bytes\" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "SourceFile": { - "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTSmiSMI", + "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", "locationName": "sourceFile", - "documentation": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'." + "documentation": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'." }, "TimeDelta": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -3695,7 +3746,7 @@ "documentation": "Specifies a time delta in seconds to offset the captions from the source file." } }, - "documentation": "Settings for File-based Captions in Source" + "documentation": "If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." }, "FontScript": { "type": "string", @@ -4888,7 +4939,7 @@ "SpekeKeyProvider": { "shape": "SpekeKeyProvider", "locationName": "spekeKeyProvider", - "documentation": "Use these settings when doing DRM encryption with a SPEKE-compliant key provider, if your output group type is HLS, MS Smooth, or DASH. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." + "documentation": "If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." }, "StaticKeyProvider": { "shape": "StaticKeyProvider", @@ -5189,6 +5240,25 @@ }, "documentation": "Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input or output individually. This setting is disabled by default." }, + "ImscDestinationSettings": { + "type": "structure", + "members": { + "StylePassthrough": { + "shape": "ImscStylePassthrough", + "locationName": "stylePassthrough", + "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions." + } + }, + "documentation": "Settings specific to IMSC caption outputs." + }, + "ImscStylePassthrough": { + "type": "string", + "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "Input": { "type": "structure", "members": { @@ -5275,7 +5345,12 @@ "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource", - "documentation": "Timecode source under input settings (InputTimecodeSource) only affects the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Use this setting to specify whether the service counts frames by timecodes embedded in the video (EMBEDDED) or by starting the first frame at zero (ZEROBASED). In both cases, the timecode format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only set this to EMBEDDED if your source video has embedded timecodes." + "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + }, + "TimecodeStart": { + "shape": "__stringMin11Max11Pattern01D20305D205D", + "locationName": "timecodeStart", + "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "VideoSelector": { "shape": "VideoSelector", @@ -5442,7 +5517,12 @@ "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource", - "documentation": "Timecode source under input settings (InputTimecodeSource) only affects the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Use this setting to specify whether the service counts frames by timecodes embedded in the video (EMBEDDED) or by starting the first frame at zero (ZEROBASED). In both cases, the timecode format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only set this to EMBEDDED if your source video has embedded timecodes." + "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + }, + "TimecodeStart": { + "shape": "__stringMin11Max11Pattern01D20305D205D", + "locationName": "timecodeStart", + "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "VideoSelector": { "shape": "VideoSelector", @@ -5454,7 +5534,7 @@ }, "InputTimecodeSource": { "type": "string", - "documentation": "Timecode source under input settings (InputTimecodeSource) only affects the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Use this setting to specify whether the service counts frames by timecodes embedded in the video (EMBEDDED) or by starting the first frame at zero (ZEROBASED). In both cases, the timecode format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only set this to EMBEDDED if your source video has embedded timecodes.", + "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", "enum": [ "EMBEDDED", "ZEROBASED", @@ -5619,6 +5699,11 @@ "locationName": "settings", "documentation": "JobSettings contains all the transcode settings for a job." }, + "SimulateReservedQueue": { + "shape": "SimulateReservedQueue", + "locationName": "simulateReservedQueue", + "documentation": "Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default." + }, "Status": { "shape": "JobStatus", "locationName": "status", @@ -7226,7 +7311,7 @@ "SpekeKeyProvider": { "shape": "SpekeKeyProvider", "locationName": "spekeKeyProvider", - "documentation": "Use these settings when doing DRM encryption with a SPEKE-compliant key provider, if your output group type is HLS, MS Smooth, or DASH. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." + "documentation": "If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." } }, "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." @@ -7379,7 +7464,7 @@ "Strength": { "shape": "__integerMin0Max16", "locationName": "strength", - "documentation": "Relative strength of noise reducing filter. Higher values produce stronger filtering. Recommended Range: * [0 .. 2] for complexity reduction with minimal sharpness loss * [2 .. 8] for complexity reduction with image preservation * [8 .. 16] for noise reduction. Reduce noise combined high complexity reduction" + "documentation": "Specify the strength of the noise reducing filter on this output. Higher values produce stronger filtering. We recommend the following value ranges, depending on the result that you want: * 0-2 for complexity reduction with minimal sharpness loss * 2-8 for complexity reduction with image preservation * 8-16 for a high level of complexity reduction" } }, "documentation": "Noise reducer filter settings for temporal filter." @@ -7914,14 +7999,14 @@ "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel. Units are in dB. Acceptable values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification)." }, "ChannelsIn": { - "shape": "__integerMin1Max16", + "shape": "__integerMin1Max64", "locationName": "channelsIn", "documentation": "Specify the number of audio channels from your input that you want to use in your output. With remixing, you might combine or split the data in these channels, so the number of channels in your final output might be different." }, "ChannelsOut": { - "shape": "__integerMin1Max8", + "shape": "__integerMin1Max64", "locationName": "channelsOut", - "documentation": "Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8" + "documentation": "Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.)" } }, "documentation": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." @@ -8093,6 +8178,14 @@ }, "documentation": "Settings for SCC caption output." }, + "SimulateReservedQueue": { + "type": "string", + "documentation": "Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "SpekeKeyProvider": { "type": "structure", "members": { @@ -8117,7 +8210,7 @@ "documentation": "Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content." } }, - "documentation": "Use these settings when doing DRM encryption with a SPEKE-compliant key provider, if your output group type is HLS, MS Smooth, or DASH. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." + "documentation": "If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." }, "SpekeKeyProviderCmaf": { "type": "structure", @@ -8148,7 +8241,7 @@ "documentation": "Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content." } }, - "documentation": "Use these settings when doing DRM encryption with a SPEKE-compliant key provider, if your output group type is CMAF. If your output group type is HLS, MS Smooth, or DASH, use the SpekeKeyProvider settings instead." + "documentation": "If your output group type is CMAF, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or Microsoft Smooth, use the SpekeKeyProvider settings instead." }, "StaticKeyProvider": { "type": "structure", @@ -8393,7 +8486,7 @@ "documentation": "Use this setting to select a single captions track from a source. Track numbers correspond to the order in the captions source file. For IMF sources, track numbering is based on the order that the captions appear in the CPL. For example, use 1 to select the captions asset that is listed first in the CPL. To include more than one captions track in your job outputs, create multiple input captions selectors. Specify one track per selector." } }, - "documentation": "Settings specific to caption sources that are specfied by track number. Sources include IMSC in IMF." + "documentation": "Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings." }, "TtmlDestinationSettings": { "type": "structure", @@ -8668,7 +8761,7 @@ "documentation": "Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use Fixed (FixedAfd) to specify a four-bit AFD value which the service will write on all frames of this video output." }, "Height": { - "shape": "__integerMin32Max2160", + "shape": "__integerMin32Max4096", "locationName": "height", "documentation": "Use the Height (Height) setting to define the video resolution height for this output. Specify in pixels. If you don't provide a value here, the service will use the input height." }, @@ -8818,9 +8911,9 @@ "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { - "shape": "__integerMin1Max8", + "shape": "__integerMin1Max64", "locationName": "channels", - "documentation": "Set Channels to specify the number of channels in this output audio track. With WAV, valid values 1, 2, 4, and 8. In the console, these values are Mono, Stereo, 4-Channel, and 8-Channel, respectively." + "documentation": "Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64." }, "Format": { "shape": "WavFormat", @@ -9047,11 +9140,6 @@ "min": 1, "max": 1001 }, - "__integerMin1Max16": { - "type": "integer", - "min": 1, - "max": 16 - }, "__integerMin1Max17895697": { "type": "integer", "min": 1, @@ -9097,10 +9185,10 @@ "min": 1, "max": 6 }, - "__integerMin1Max8": { + "__integerMin1Max64": { "type": "integer", "min": 1, - "max": 8 + "max": 64 }, "__integerMin24Max60000": { "type": "integer", @@ -9132,11 +9220,6 @@ "min": 32000, "max": 48000 }, - "__integerMin32Max2160": { - "type": "integer", - "min": 32, - "max": 2160 - }, "__integerMin32Max4096": { "type": "integer", "min": 32, @@ -9470,10 +9553,10 @@ "min": 14, "pattern": "^(s3:\\/\\/)(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)$" }, - "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTSmiSMI": { + "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { "type": "string", "min": 14, - "pattern": "^(s3:\\/\\/)(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|smi|SMI)$" + "pattern": "^(s3:\\/\\/)(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|xml|XML|smi|SMI)$" }, "__stringMin16Max24PatternAZaZ0922AZaZ0916": { "type": "string", diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index e351212c..ce5c77b6 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -1894,7 +1894,7 @@ "AudioOnlyImage": { "shape": "InputLocation", "locationName": "audioOnlyImage", - "documentation": "For use with an audio only Stream. Must be a .jpg or .png file. If given, this image will be used as the cover-art for the audio only output. Ideally, it should be formatted for an iPhone screen for two reasons. The iPhone does not resize the image, it crops a centered image on the top/bottom and left/right. Additionally, this image file gets saved bit-for-bit into every 10-second segment file, so will increase bandwidth by {image file size} * {segment count} * {user count.}." + "documentation": "Optional. Specifies the .jpg or .png image to use as the cover art for an audio-only output. We recommend a low bit-size file because the image increases the output audio bandwidth.\n\nThe image is attached to the audio as an ID3 tag, frame type APIC, picture type 0x10, as per the \"ID3 tag version 2.4.0 - Native Frames\" standard." }, "AudioTrackType": { "shape": "AudioOnlyHlsTrackType", @@ -2733,6 +2733,12 @@ }, "documentation": "Placeholder documentation for ChannelSummary" }, + "ColorSpacePassthroughSettings": { + "type": "structure", + "members": { + }, + "documentation": "Passthrough applies no color space conversion to the output" + }, "ConflictException": { "type": "structure", "members": { @@ -4646,6 +4652,24 @@ "INSERT" ] }, + "H264ColorSpaceSettings": { + "type": "structure", + "members": { + "ColorSpacePassthroughSettings": { + "shape": "ColorSpacePassthroughSettings", + "locationName": "colorSpacePassthroughSettings" + }, + "Rec601Settings": { + "shape": "Rec601Settings", + "locationName": "rec601Settings" + }, + "Rec709Settings": { + "shape": "Rec709Settings", + "locationName": "rec709Settings" + } + }, + "documentation": "H264 Color Space Settings" + }, "H264EntropyEncoding": { "type": "string", "documentation": "H264 Entropy Encoding", @@ -4743,6 +4767,7 @@ "documentation": "H264 Rate Control Mode", "enum": [ "CBR", + "MULTIPLEX", "QVBR", "VBR" ] @@ -4789,13 +4814,18 @@ "BufSize": { "shape": "__integerMin0", "locationName": "bufSize", - "documentation": "Size of buffer (HRD buffer model) in bits/second." + "documentation": "Size of buffer (HRD buffer model) in bits." }, "ColorMetadata": { "shape": "H264ColorMetadata", "locationName": "colorMetadata", "documentation": "Includes colorspace metadata in the output." }, + "ColorSpaceSettings": { + "shape": "H264ColorSpaceSettings", + "locationName": "colorSpaceSettings", + "documentation": "Color Space settings" + }, "EntropyEncoding": { "shape": "H264EntropyEncoding", "locationName": "entropyEncoding", @@ -4864,7 +4894,7 @@ "MaxBitrate": { "shape": "__integerMin1000", "locationName": "maxBitrate", - "documentation": "For QVBR: See the tooltip for Quality level \n\nFor VBR: Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video." + "documentation": "For QVBR: See the tooltip for Quality level\n\nFor VBR: Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video." }, "MinIInterval": { "shape": "__integerMin0Max30", @@ -4904,7 +4934,7 @@ "RateControlMode": { "shape": "H264RateControlMode", "locationName": "rateControlMode", - "documentation": "Rate control mode. \n\nQVBR: Quality will match the specified quality level except when it is constrained by the\nmaximum bitrate. Recommended if you or your viewers pay for bandwidth.\n\nVBR: Quality and bitrate vary, depending on the video complexity. Recommended instead of QVBR\nif you want to maintain a specific average bitrate over the duration of the channel.\n\nCBR: Quality varies, depending on the video complexity. Recommended only if you distribute\nyour assets to devices that cannot handle variable bitrates." + "documentation": "Rate control mode.\n\nQVBR: Quality will match the specified quality level except when it is constrained by the\nmaximum bitrate. Recommended if you or your viewers pay for bandwidth.\n\nVBR: Quality and bitrate vary, depending on the video complexity. Recommended instead of QVBR\nif you want to maintain a specific average bitrate over the duration of the channel.\n\nCBR: Quality varies, depending on the video complexity. Recommended only if you distribute\nyour assets to devices that cannot handle variable bitrates." }, "ScanType": { "shape": "H264ScanType", @@ -4994,6 +5024,314 @@ "PIC_TIMING_SEI" ] }, + "H265AdaptiveQuantization": { + "type": "string", + "documentation": "H265 Adaptive Quantization", + "enum": [ + "HIGH", + "HIGHER", + "LOW", + "MAX", + "MEDIUM", + "OFF" + ] + }, + "H265AlternativeTransferFunction": { + "type": "string", + "documentation": "H265 Alternative Transfer Function", + "enum": [ + "INSERT", + "OMIT" + ] + }, + "H265ColorMetadata": { + "type": "string", + "documentation": "H265 Color Metadata", + "enum": [ + "IGNORE", + "INSERT" + ] + }, + "H265ColorSpaceSettings": { + "type": "structure", + "members": { + "ColorSpacePassthroughSettings": { + "shape": "ColorSpacePassthroughSettings", + "locationName": "colorSpacePassthroughSettings" + }, + "Hdr10Settings": { + "shape": "Hdr10Settings", + "locationName": "hdr10Settings" + }, + "Rec601Settings": { + "shape": "Rec601Settings", + "locationName": "rec601Settings" + }, + "Rec709Settings": { + "shape": "Rec709Settings", + "locationName": "rec709Settings" + } + }, + "documentation": "H265 Color Space Settings" + }, + "H265FlickerAq": { + "type": "string", + "documentation": "H265 Flicker Aq", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "H265GopSizeUnits": { + "type": "string", + "documentation": "H265 Gop Size Units", + "enum": [ + "FRAMES", + "SECONDS" + ] + }, + "H265Level": { + "type": "string", + "documentation": "H265 Level", + "enum": [ + "H265_LEVEL_1", + "H265_LEVEL_2", + "H265_LEVEL_2_1", + "H265_LEVEL_3", + "H265_LEVEL_3_1", + "H265_LEVEL_4", + "H265_LEVEL_4_1", + "H265_LEVEL_5", + "H265_LEVEL_5_1", + "H265_LEVEL_5_2", + "H265_LEVEL_6", + "H265_LEVEL_6_1", + "H265_LEVEL_6_2", + "H265_LEVEL_AUTO" + ] + }, + "H265LookAheadRateControl": { + "type": "string", + "documentation": "H265 Look Ahead Rate Control", + "enum": [ + "HIGH", + "LOW", + "MEDIUM" + ] + }, + "H265Profile": { + "type": "string", + "documentation": "H265 Profile", + "enum": [ + "MAIN", + "MAIN_10BIT" + ] + }, + "H265RateControlMode": { + "type": "string", + "documentation": "H265 Rate Control Mode", + "enum": [ + "CBR", + "QVBR" + ] + }, + "H265ScanType": { + "type": "string", + "documentation": "H265 Scan Type", + "enum": [ + "PROGRESSIVE" + ] + }, + "H265SceneChangeDetect": { + "type": "string", + "documentation": "H265 Scene Change Detect", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "H265Settings": { + "type": "structure", + "members": { + "AdaptiveQuantization": { + "shape": "H265AdaptiveQuantization", + "locationName": "adaptiveQuantization", + "documentation": "Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality." + }, + "AfdSignaling": { + "shape": "AfdSignaling", + "locationName": "afdSignaling", + "documentation": "Indicates that AFD values will be written into the output stream. If afdSignaling is \"auto\", the system will try to preserve the input AFD value (in cases where multiple AFD values are valid). If set to \"fixed\", the AFD value will be the value configured in the fixedAfd parameter." + }, + "AlternativeTransferFunction": { + "shape": "H265AlternativeTransferFunction", + "locationName": "alternativeTransferFunction", + "documentation": "Whether or not EML should insert an Alternative Transfer Function SEI message to support backwards compatibility with non-HDR decoders and displays." + }, + "Bitrate": { + "shape": "__integerMin100000Max40000000", + "locationName": "bitrate", + "documentation": "Average bitrate in bits/second. Required when the rate control mode is VBR or CBR. Not used for QVBR. In an MS Smooth output group, each output must have a unique value when its bitrate is rounded down to the nearest multiple of 1000." + }, + "BufSize": { + "shape": "__integerMin100000Max80000000", + "locationName": "bufSize", + "documentation": "Size of buffer (HRD buffer model) in bits." + }, + "ColorMetadata": { + "shape": "H265ColorMetadata", + "locationName": "colorMetadata", + "documentation": "Includes colorspace metadata in the output." + }, + "ColorSpaceSettings": { + "shape": "H265ColorSpaceSettings", + "locationName": "colorSpaceSettings", + "documentation": "Color Space settings" + }, + "FixedAfd": { + "shape": "FixedAfd", + "locationName": "fixedAfd", + "documentation": "Four bit AFD value to write on all frames of video in the output stream. Only valid when afdSignaling is set to 'Fixed'." + }, + "FlickerAq": { + "shape": "H265FlickerAq", + "locationName": "flickerAq", + "documentation": "If set to enabled, adjust quantization within each frame to reduce flicker or 'pop' on I-frames." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max3003", + "locationName": "framerateDenominator", + "documentation": "Framerate denominator." + }, + "FramerateNumerator": { + "shape": "__integerMin1", + "locationName": "framerateNumerator", + "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." + }, + "GopClosedCadence": { + "shape": "__integerMin0", + "locationName": "gopClosedCadence", + "documentation": "Frequency of closed GOPs. In streaming applications, it is recommended that this be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly as possible. Setting this value to 0 will break output segmenting." + }, + "GopSize": { + "shape": "__doubleMin1", + "locationName": "gopSize", + "documentation": "GOP size (keyframe interval) in units of either frames or seconds per gopSizeUnits. Must be greater than zero." + }, + "GopSizeUnits": { + "shape": "H265GopSizeUnits", + "locationName": "gopSizeUnits", + "documentation": "Indicates if the gopSize is specified in frames or seconds. If seconds the system will convert the gopSize into a frame count at run time." + }, + "Level": { + "shape": "H265Level", + "locationName": "level", + "documentation": "H.265 Level." + }, + "LookAheadRateControl": { + "shape": "H265LookAheadRateControl", + "locationName": "lookAheadRateControl", + "documentation": "Amount of lookahead. A value of low can decrease latency and memory usage, while high can produce better quality for certain content." + }, + "MaxBitrate": { + "shape": "__integerMin100000Max40000000", + "locationName": "maxBitrate", + "documentation": "For QVBR: See the tooltip for Quality level" + }, + "MinIInterval": { + "shape": "__integerMin0Max30", + "locationName": "minIInterval", + "documentation": "Only meaningful if sceneChangeDetect is set to enabled. Enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting I-interval. The normal cadence resumes for the next GOP. Note: Maximum GOP stretch = GOP size + Min-I-interval - 1" + }, + "ParDenominator": { + "shape": "__integerMin1", + "locationName": "parDenominator", + "documentation": "Pixel Aspect Ratio denominator." + }, + "ParNumerator": { + "shape": "__integerMin1", + "locationName": "parNumerator", + "documentation": "Pixel Aspect Ratio numerator." + }, + "Profile": { + "shape": "H265Profile", + "locationName": "profile", + "documentation": "H.265 Profile." + }, + "QvbrQualityLevel": { + "shape": "__integerMin1Max10", + "locationName": "qvbrQualityLevel", + "documentation": "Controls the target quality for the video encode. Applies only when the rate control mode is QVBR. Set values for the QVBR quality level field and Max bitrate field that suit your most important viewing devices. Recommended values are:\n- Primary screen: Quality level: 8 to 10. Max bitrate: 4M\n- PC or tablet: Quality level: 7. Max bitrate: 1.5M to 3M\n- Smartphone: Quality level: 6. Max bitrate: 1M to 1.5M" + }, + "RateControlMode": { + "shape": "H265RateControlMode", + "locationName": "rateControlMode", + "documentation": "Rate control mode.\n\nQVBR: Quality will match the specified quality level except when it is constrained by the\nmaximum bitrate. Recommended if you or your viewers pay for bandwidth.\n\nCBR: Quality varies, depending on the video complexity. Recommended only if you distribute\nyour assets to devices that cannot handle variable bitrates." + }, + "ScanType": { + "shape": "H265ScanType", + "locationName": "scanType", + "documentation": "Sets the scan type of the output to progressive or top-field-first interlaced." + }, + "SceneChangeDetect": { + "shape": "H265SceneChangeDetect", + "locationName": "sceneChangeDetect", + "documentation": "Scene change detection." + }, + "Slices": { + "shape": "__integerMin1Max16", + "locationName": "slices", + "documentation": "Number of slices per picture. Must be less than or equal to the number of macroblock rows for progressive pictures, and less than or equal to half the number of macroblock rows for interlaced pictures.\nThis field is optional; when no value is specified the encoder will choose the number of slices based on encode resolution." + }, + "Tier": { + "shape": "H265Tier", + "locationName": "tier", + "documentation": "H.265 Tier." + }, + "TimecodeInsertion": { + "shape": "H265TimecodeInsertionBehavior", + "locationName": "timecodeInsertion", + "documentation": "Determines how timecodes should be inserted into the video elementary stream.\n- 'disabled': Do not include timecodes\n- 'picTimingSei': Pass through picture timing SEI messages from the source specified in Timecode Config" + } + }, + "documentation": "H265 Settings", + "required": [ + "FramerateNumerator", + "FramerateDenominator" + ] + }, + "H265Tier": { + "type": "string", + "documentation": "H265 Tier", + "enum": [ + "HIGH", + "MAIN" + ] + }, + "H265TimecodeInsertionBehavior": { + "type": "string", + "documentation": "H265 Timecode Insertion Behavior", + "enum": [ + "DISABLED", + "PIC_TIMING_SEI" + ] + }, + "Hdr10Settings": { + "type": "structure", + "members": { + "MaxCll": { + "shape": "__integerMin0Max32768", + "locationName": "maxCll", + "documentation": "Maximum Content Light Level\nAn integer metadata value defining the maximum light level, in nits,\nof any single pixel within an encoded HDR video stream or file." + }, + "MaxFall": { + "shape": "__integerMin0Max32768", + "locationName": "maxFall", + "documentation": "Maximum Frame Average Light Level\nAn integer metadata value defining the maximum average light level, in nits,\nfor any single frame within an encoded HDR video stream or file." + } + }, + "documentation": "Hdr10 Settings" + }, "HlsAdMarkers": { "type": "string", "documentation": "Hls Ad Markers", @@ -7352,9 +7690,22 @@ "Destination" ] }, + "MsSmoothH265PackagingType": { + "type": "string", + "documentation": "Ms Smooth H265 Packaging Type", + "enum": [ + "HEV1", + "HVC1" + ] + }, "MsSmoothOutputSettings": { "type": "structure", "members": { + "H265PackagingType": { + "shape": "MsSmoothH265PackagingType", + "locationName": "h265PackagingType", + "documentation": "Only applicable when this output is referencing an H.265 video description.\nSpecifies whether MP4 segments should be packaged as HEV1 or HVC1." + }, "NameModifier": { "shape": "__string", "locationName": "nameModifier", @@ -7815,6 +8166,18 @@ }, "documentation": "PurchaseOffering response" }, + "Rec601Settings": { + "type": "structure", + "members": { + }, + "documentation": "Rec601 Settings" + }, + "Rec709Settings": { + "type": "structure", + "members": { + }, + "documentation": "Rec709 Settings" + }, "RemixSettings": { "type": "structure", "members": { @@ -8622,26 +8985,6 @@ "WEB_DELIVERY_ALLOWED" ] }, - "ServiceDescriptor": { - "type": "structure", - "members": { - "ProviderName": { - "shape": "__stringMax256", - "locationName": "providerName", - "documentation": "Name of provider" - }, - "ServiceName": { - "shape": "__stringMax256", - "locationName": "serviceName", - "documentation": "Name of service" - } - }, - "documentation": "Program configuration.", - "required": [ - "ProviderName", - "ServiceName" - ] - }, "SmoothGroupAudioOnlyTimecodeControl": { "type": "string", "documentation": "Smooth Group Audio Only Timecode Control", @@ -9609,6 +9952,10 @@ "H264Settings": { "shape": "H264Settings", "locationName": "h264Settings" + }, + "H265Settings": { + "shape": "H265Settings", + "locationName": "h265Settings" } }, "documentation": "Video Codec Settings" @@ -9680,7 +10027,7 @@ "ColorSpace": { "shape": "VideoSelectorColorSpace", "locationName": "colorSpace", - "documentation": "Specifies the colorspace of an input. This setting works in tandem with colorSpaceConversion to determine if any conversion will be performed." + "documentation": "Specifies the color space of an input. This setting works in tandem with colorSpaceUsage and a video description's colorSpaceSettingsChoice to determine if any conversion will be performed." }, "ColorSpaceUsage": { "shape": "VideoSelectorColorSpaceUsage", @@ -9849,6 +10196,12 @@ "max": 30, "documentation": "Placeholder documentation for __integerMin0Max30" }, + "__integerMin0Max32768": { + "type": "integer", + "min": 0, + "max": 32768, + "documentation": "Placeholder documentation for __integerMin0Max32768" + }, "__integerMin0Max3600": { "type": "integer", "min": 0, @@ -9907,6 +10260,18 @@ "max": 100000000, "documentation": "Placeholder documentation for __integerMin100000Max100000000" }, + "__integerMin100000Max40000000": { + "type": "integer", + "min": 100000, + "max": 40000000, + "documentation": "Placeholder documentation for __integerMin100000Max40000000" + }, + "__integerMin100000Max80000000": { + "type": "integer", + "min": 100000, + "max": 80000000, + "documentation": "Placeholder documentation for __integerMin100000Max80000000" + }, "__integerMin1000Max30000": { "type": "integer", "min": 1000, @@ -9937,6 +10302,12 @@ "max": 20, "documentation": "Placeholder documentation for __integerMin1Max20" }, + "__integerMin1Max3003": { + "type": "integer", + "min": 1, + "max": 3003, + "documentation": "Placeholder documentation for __integerMin1Max3003" + }, "__integerMin1Max31": { "type": "integer", "min": 1, diff --git a/botocore/data/mediapackage-vod/2018-11-07/service-2.json b/botocore/data/mediapackage-vod/2018-11-07/service-2.json index b6ddb13c..b38e47db 100644 --- a/botocore/data/mediapackage-vod/2018-11-07/service-2.json +++ b/botocore/data/mediapackage-vod/2018-11-07/service-2.json @@ -1087,6 +1087,11 @@ "HlsEncryption": { "documentation": "An HTTP Live Streaming (HLS) encryption configuration.", "members": { + "ConstantInitializationVector": { + "documentation": "A constant initialization vector for encryption (optional).\nWhen not specified the initialization vector will be periodically rotated.\n", + "locationName": "constantInitializationVector", + "shape": "__string" + }, "EncryptionMethod": { "documentation": "The encryption method to use.", "locationName": "encryptionMethod", diff --git a/botocore/data/mq/2017-11-27/service-2.json b/botocore/data/mq/2017-11-27/service-2.json index 7a908459..98259235 100644 --- a/botocore/data/mq/2017-11-27/service-2.json +++ b/botocore/data/mq/2017-11-27/service-2.json @@ -1029,7 +1029,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -1132,7 +1132,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -1614,6 +1614,16 @@ "locationName" : "pendingEngineVersion", "documentation" : "The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, + "PendingSecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "pendingSecurityGroups", + "documentation" : "The list of pending security groups to authorize connections to brokers." + }, + "PendingHostInstanceType" : { + "shape" : "__string", + "locationName" : "pendingHostInstanceType", + "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" + }, "PubliclyAccessible" : { "shape" : "__boolean", "locationName" : "publiclyAccessible", @@ -1622,7 +1632,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -1737,6 +1747,16 @@ "locationName" : "pendingEngineVersion", "documentation" : "The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, + "PendingSecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "pendingSecurityGroups", + "documentation" : "The list of pending security groups to authorize connections to brokers." + }, + "PendingHostInstanceType" : { + "shape" : "__string", + "locationName" : "pendingHostInstanceType", + "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" + }, "PubliclyAccessible" : { "shape" : "__boolean", "locationName" : "publiclyAccessible", @@ -1745,7 +1765,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "Required. The list of rules (1 minimum, 125 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -2520,10 +2540,20 @@ "locationName" : "engineVersion", "documentation" : "The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, + "HostInstanceType" : { + "shape" : "__string", + "locationName" : "hostInstanceType", + "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", "documentation" : "Enables Amazon CloudWatch logging for brokers." + }, + "SecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "securityGroups", + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." } }, "documentation" : "Updates the broker using the specified properties." @@ -2551,10 +2581,20 @@ "locationName" : "engineVersion", "documentation" : "The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, + "HostInstanceType" : { + "shape" : "__string", + "locationName" : "hostInstanceType", + "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", "documentation" : "The list of information about logs to be enabled for the specified broker." + }, + "SecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "securityGroups", + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." } }, "documentation" : "Returns information about the updated broker." @@ -2583,10 +2623,20 @@ "locationName" : "engineVersion", "documentation" : "The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, + "HostInstanceType" : { + "shape" : "__string", + "locationName" : "hostInstanceType", + "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", "documentation" : "Enables Amazon CloudWatch logging for brokers." + }, + "SecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "securityGroups", + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." } }, "documentation" : "Updates the broker using the specified properties.", @@ -2615,10 +2665,20 @@ "locationName" : "engineVersion", "documentation" : "The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, + "HostInstanceType" : { + "shape" : "__string", + "locationName" : "hostInstanceType", + "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", "documentation" : "The list of information about logs to be enabled for the specified broker." + }, + "SecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "securityGroups", + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." } } }, @@ -3002,4 +3062,4 @@ } }, "documentation" : "Amazon MQ is a managed message broker service for Apache ActiveMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols." -} \ No newline at end of file +} diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index 96495389..0b1f2056 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -56,7 +56,7 @@ {"shape":"TargetNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

This operation can be called only from the organization's master account.

" + "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

This operation can be called only from the organization's master account.

" }, "CancelHandshake":{ "name":"CancelHandshake", @@ -97,7 +97,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" + "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateGovCloudAccount":{ "name":"CreateGovCloudAccount", @@ -384,7 +384,7 @@ {"shape":"TargetNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

" + "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of blacklisting .

This operation can be called only from the organization's master account.

" }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -423,7 +423,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This operation can be called only from the organization's master account.

If you disable a policy type for a root, it still shows as enabled for the organization if all features are enabled in that organization. Use ListRoots to see the status of policy types for a specified root. Use DescribeOrganization to see the status of policy types in the organization.

" + "documentation":"

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

To view the status of available policy types in the organization, use DescribeOrganization.

" }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -482,7 +482,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PolicyTypeNotAvailableForOrganizationException"} ], - "documentation":"

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. Use DescribeOrganization to view the status of available policy types in the organization.

To view the status of policy type in a root, use ListRoots.

" + "documentation":"

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This is an asynchronous request that AWS performs in the background. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

" }, "InviteAccountToOrganization":{ "name":"InviteAccountToOrganization", @@ -1057,7 +1057,7 @@ }, "TargetId":{ "shape":"PolicyTargetId", - "documentation":"

The unique identifier (ID) of the root, OU, or account that you want to attach the policy to. You can get the ID by calling the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the root, OU, or account that you want to attach the policy to. You can get the ID by calling the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

" } } }, @@ -1309,7 +1309,7 @@ "members":{ "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

" + "documentation":"

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

" } } }, @@ -1331,7 +1331,7 @@ "members":{ "ParentId":{ "shape":"ParentId", - "documentation":"

The unique identifier (ID) of the parent root or OU that you want to create the new OU in.

The regex pattern for a parent ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the parent root or OU that you want to create the new OU in.

The regex pattern for a parent ID string requires one of the following:

" }, "Name":{ "shape":"OrganizationalUnitName", @@ -1448,7 +1448,7 @@ "members":{ "CreateAccountRequestId":{ "shape":"CreateAccountRequestId", - "documentation":"

Specifies the operationId that uniquely identifies the request. You can get the ID from the response to an earlier CreateAccount request, or from the ListCreateAccountStatus operation.

The regex pattern for an create account request ID string requires \"car-\" followed by from 8 to 32 lower-case letters or digits.

" + "documentation":"

Specifies the operationId that uniquely identifies the request. You can get the ID from the response to an earlier CreateAccount request, or from the ListCreateAccountStatus operation.

The regex pattern for a create account request ID string requires \"car-\" followed by from 8 to 32 lower-case letters or digits.

" } } }, @@ -1548,7 +1548,7 @@ }, "TargetId":{ "shape":"PolicyTargetId", - "documentation":"

The unique identifier (ID) of the root, OU, or account that you want to detach the policy from. You can get the ID from the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the root, OU, or account that you want to detach the policy from. You can get the ID from the ListRoots, ListOrganizationalUnitsForParent, or ListAccounts operations.

The regex pattern for a target ID string requires one of the following:

" } } }, @@ -2074,7 +2074,7 @@ "members":{ "ParentId":{ "shape":"ParentId", - "documentation":"

The unique identifier (ID) for the parent root or OU whose children you want to list.

The regex pattern for a parent ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) for the parent root or OU whose children you want to list.

The regex pattern for a parent ID string requires one of the following:

" }, "ChildType":{ "shape":"ChildType", @@ -2199,7 +2199,7 @@ "members":{ "ParentId":{ "shape":"ParentId", - "documentation":"

The unique identifier (ID) of the root or OU whose child OUs you want to list.

The regex pattern for a parent ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the root or OU whose child OUs you want to list.

The regex pattern for a parent ID string requires one of the following:

" }, "NextToken":{ "shape":"NextToken", @@ -2230,7 +2230,7 @@ "members":{ "ChildId":{ "shape":"ChildId", - "documentation":"

The unique identifier (ID) of the OU or account whose parent containers you want to list. Don't specify a root.

The regex pattern for a child ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the OU or account whose parent containers you want to list. Don't specify a root.

The regex pattern for a child ID string requires one of the following:

" }, "NextToken":{ "shape":"NextToken", @@ -2264,7 +2264,7 @@ "members":{ "TargetId":{ "shape":"PolicyTargetId", - "documentation":"

The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list.

The regex pattern for a target ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list.

The regex pattern for a target ID string requires one of the following:

" }, "Filter":{ "shape":"PolicyType", @@ -2444,11 +2444,11 @@ }, "SourceParentId":{ "shape":"ParentId", - "documentation":"

The unique identifier (ID) of the root or organizational unit that you want to move the account from.

The regex pattern for a parent ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the root or organizational unit that you want to move the account from.

The regex pattern for a parent ID string requires one of the following:

" }, "DestinationParentId":{ "shape":"ParentId", - "documentation":"

The unique identifier (ID) of the root or organizational unit that you want to move the account to.

The regex pattern for a parent ID string requires one of the following:

" + "documentation":"

The unique identifier (ID) of the root or organizational unit that you want to move the account to.

The regex pattern for a parent ID string requires one of the following:

" } } }, diff --git a/botocore/data/personalize-runtime/2018-05-22/service-2.json b/botocore/data/personalize-runtime/2018-05-22/service-2.json index 08efae25..3c29c4a2 100644 --- a/botocore/data/personalize-runtime/2018-05-22/service-2.json +++ b/botocore/data/personalize-runtime/2018-05-22/service-2.json @@ -114,8 +114,7 @@ }, "InputList":{ "type":"list", - "member":{"shape":"ItemID"}, - "max":100 + "member":{"shape":"ItemID"} }, "InvalidInputException":{ "type":"structure", @@ -132,12 +131,10 @@ }, "ItemList":{ "type":"list", - "member":{"shape":"PredictedItem"}, - "max":100 + "member":{"shape":"PredictedItem"} }, "NumResults":{ "type":"integer", - "max":100, "min":0 }, "PredictedItem":{ diff --git a/botocore/data/personalize/2018-05-22/service-2.json b/botocore/data/personalize/2018-05-22/service-2.json index 4fe9ee24..de6f4a9e 100644 --- a/botocore/data/personalize/2018-05-22/service-2.json +++ b/botocore/data/personalize/2018-05-22/service-2.json @@ -76,7 +76,8 @@ {"shape":"InvalidInputException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ResourceAlreadyExistsException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an AWS Identity and Access Management (IAM) role that has permission to read from the data source.

The dataset import job replaces any previous data in the dataset.

Status

A dataset import job can be in one of the following states:

To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.

Related APIs

" }, @@ -528,7 +529,8 @@ "output":{"shape":"ListSolutionVersionsResponse"}, "errors":[ {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} ], "documentation":"

Returns a list of solution versions for the given solution. When a solution is not specified, all the solution versions associated with the account are listed. The response provides the properties for each solution version, including the Amazon Resource Name (ARN). For more information on solutions, see CreateSolution.

", "idempotent":true @@ -669,6 +671,7 @@ "type":"string", "max":10000 }, + "Boolean":{"type":"boolean"}, "Campaign":{ "type":"structure", "members":{ @@ -1030,7 +1033,7 @@ "documentation":"

The name for the solution.

" }, "performHPO":{ - "shape":"PerformHPO", + "shape":"Boolean", "documentation":"

Whether to perform hyperparameter optimization (HPO) on the specified or selected recipe. The default is false.

When performing AutoML, this parameter is always true and you should not set it to false.

" }, "performAutoML":{ @@ -2592,6 +2595,10 @@ "shape":"SolutionConfig", "documentation":"

Describes the configuration properties for the solution.

" }, + "trainingHours":{ + "shape":"TrainingHours", + "documentation":"

The time used to train the model.

" + }, "status":{ "shape":"Status", "documentation":"

The status of the solution version.

A solution version can be in one of the following states:

" @@ -2655,6 +2662,10 @@ "type":"string", "max":256 }, + "TrainingHours":{ + "type":"double", + "min":0 + }, "TrainingInputMode":{ "type":"string", "max":256 diff --git a/botocore/data/qldb-session/2019-07-11/paginators-1.json b/botocore/data/qldb-session/2019-07-11/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/qldb-session/2019-07-11/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/qldb-session/2019-07-11/service-2.json b/botocore/data/qldb-session/2019-07-11/service-2.json new file mode 100644 index 00000000..4cbf48b6 --- /dev/null +++ b/botocore/data/qldb-session/2019-07-11/service-2.json @@ -0,0 +1,381 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-07-11", + "endpointPrefix":"session.qldb", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"QLDB Session", + "serviceFullName":"Amazon QLDB Session", + "serviceId":"QLDB Session", + "signatureVersion":"v4", + "signingName":"qldb", + "targetPrefix":"QLDBSession", + "uid":"qldb-session-2019-07-11" + }, + "operations":{ + "SendCommand":{ + "name":"SendCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendCommandRequest"}, + "output":{"shape":"SendCommandResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidSessionException"}, + {"shape":"OccConflictException"}, + {"shape":"RateExceededException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Sends a command to an Amazon QLDB ledger.

" + } + }, + "shapes":{ + "AbortTransactionRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains the details of the transaction to abort.

" + }, + "AbortTransactionResult":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains the details of the aborted transaction.

" + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"} + }, + "documentation":"

Returned if the request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

", + "exception":true + }, + "CommitDigest":{"type":"blob"}, + "CommitTransactionRequest":{ + "type":"structure", + "required":[ + "TransactionId", + "CommitDigest" + ], + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

Specifies the transaction id of the transaction to commit.

" + }, + "CommitDigest":{ + "shape":"CommitDigest", + "documentation":"

Specifies the commit digest for the transaction to commit. For every active transaction, the commit digest must be passed. QLDB validates CommitDigest and rejects the commit with an error if the digest computed on the client does not match the digest computed by QLDB.

" + } + }, + "documentation":"

Contains the details of the transaction to commit.

" + }, + "CommitTransactionResult":{ + "type":"structure", + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

The transaction id of the committed transaction.

" + }, + "CommitDigest":{ + "shape":"CommitDigest", + "documentation":"

The commit digest of the committed transaction.

" + } + }, + "documentation":"

Contains the details of the committed transaction.

" + }, + "EndSessionRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

Specifies a request to end the session.

" + }, + "EndSessionResult":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains the details of the ended session.

" + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "ExecuteStatementRequest":{ + "type":"structure", + "required":[ + "TransactionId", + "Statement" + ], + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

Specifies the transaction id of the request.

" + }, + "Statement":{ + "shape":"Statement", + "documentation":"

Specifies the statement of the request.

" + }, + "Parameters":{ + "shape":"StatementParameters", + "documentation":"

Specifies the parameters for the parameterized statement in the request.

" + } + }, + "documentation":"

Specifies a request to execute a statement.

" + }, + "ExecuteStatementResult":{ + "type":"structure", + "members":{ + "FirstPage":{ + "shape":"Page", + "documentation":"

Contains the details of the first fetched page.

" + } + }, + "documentation":"

Contains the details of the executed statement.

" + }, + "FetchPageRequest":{ + "type":"structure", + "required":[ + "TransactionId", + "NextPageToken" + ], + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

Specifies the transaction id of the page to be fetched.

" + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

Specifies the next page token of the page to be fetched.

" + } + }, + "documentation":"

Specifies the details of the page to be fetched.

" + }, + "FetchPageResult":{ + "type":"structure", + "members":{ + "Page":{ + "shape":"Page", + "documentation":"

Contains details of the fetched page.

" + } + }, + "documentation":"

Contains the page that was fetched.

" + }, + "InvalidSessionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"} + }, + "documentation":"

Returned if the session doesn't exist anymore because it timed-out or expired.

", + "exception":true + }, + "IonBinary":{ + "type":"blob", + "max":131072, + "min":1 + }, + "IonText":{ + "type":"string", + "max":1048576, + "min":1 + }, + "LedgerName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?!^.*--)(?!^[0-9]+$)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if a resource limit such as number of active sessions is exceeded.

", + "exception":true + }, + "OccConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned when a transaction cannot be written to the journal due to a failure in the verification phase of Optimistic Concurrency Control.

", + "exception":true + }, + "Page":{ + "type":"structure", + "members":{ + "Values":{ + "shape":"ValueHolders", + "documentation":"

A structure that contains values in multiple encoding formats.

" + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

The token of the next page.

" + } + }, + "documentation":"

Contains details of the fetched page.

" + }, + "PageToken":{ + "type":"string", + "max":1024, + "min":4, + "pattern":"^[A-Za-z-0-9+/=]+$" + }, + "RateExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned when the rate of requests exceeds the allowed throughput.

", + "exception":true + }, + "SendCommandRequest":{ + "type":"structure", + "members":{ + "SessionToken":{ + "shape":"SessionToken", + "documentation":"

Specifies the session token for the current command. A session token is constant throughout the life of the session.

To obtain a session token, run the StartSession command. This SessionToken is required for every subsequent command that is issued during the current session.

" + }, + "StartSession":{ + "shape":"StartSessionRequest", + "documentation":"

Command to start a new session. A session token is obtained as part of the response.

" + }, + "StartTransaction":{ + "shape":"StartTransactionRequest", + "documentation":"

Command to start a new transaction.

" + }, + "EndSession":{ + "shape":"EndSessionRequest", + "documentation":"

Command to end the current session.

" + }, + "CommitTransaction":{ + "shape":"CommitTransactionRequest", + "documentation":"

Command to commit the specified transaction.

" + }, + "AbortTransaction":{ + "shape":"AbortTransactionRequest", + "documentation":"

Command to abort the current transaction.

" + }, + "ExecuteStatement":{ + "shape":"ExecuteStatementRequest", + "documentation":"

Command to execute a statement in the specified transaction.

" + }, + "FetchPage":{ + "shape":"FetchPageRequest", + "documentation":"

Command to fetch a page.

" + } + } + }, + "SendCommandResult":{ + "type":"structure", + "members":{ + "StartSession":{ + "shape":"StartSessionResult", + "documentation":"

Contains the details of the started session that includes a session token. This SessionToken is required for every subsequent command that is issued during the current session.

" + }, + "StartTransaction":{ + "shape":"StartTransactionResult", + "documentation":"

Contains the details of the started transaction.

" + }, + "EndSession":{ + "shape":"EndSessionResult", + "documentation":"

Contains the details of the ended session.

" + }, + "CommitTransaction":{ + "shape":"CommitTransactionResult", + "documentation":"

Contains the details of the committed transaction.

" + }, + "AbortTransaction":{ + "shape":"AbortTransactionResult", + "documentation":"

Contains the details of the aborted transaction.

" + }, + "ExecuteStatement":{ + "shape":"ExecuteStatementResult", + "documentation":"

Contains the details of the executed statement.

" + }, + "FetchPage":{ + "shape":"FetchPageResult", + "documentation":"

Contains the details of the fetched page.

" + } + } + }, + "SessionToken":{ + "type":"string", + "max":1024, + "min":4, + "pattern":"^[A-Za-z-0-9+/=]+$" + }, + "StartSessionRequest":{ + "type":"structure", + "required":["LedgerName"], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger to start a new session against.

" + } + }, + "documentation":"

Specifies a request to start a a new session.

" + }, + "StartSessionResult":{ + "type":"structure", + "members":{ + "SessionToken":{ + "shape":"SessionToken", + "documentation":"

Session token of the started session. This SessionToken is required for every subsequent command that is issued during the current session.

" + } + }, + "documentation":"

Contains the details of the started session.

" + }, + "StartTransactionRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

Specifies a request to start a transaction.

" + }, + "StartTransactionResult":{ + "type":"structure", + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

The transaction id of the started transaction.

" + } + }, + "documentation":"

Contains the details of the started transaction.

" + }, + "Statement":{ + "type":"string", + "max":100000, + "min":1 + }, + "StatementParameters":{ + "type":"list", + "member":{"shape":"ValueHolder"} + }, + "TransactionId":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[A-Za-z-0-9]+$" + }, + "ValueHolder":{ + "type":"structure", + "members":{ + "IonBinary":{ + "shape":"IonBinary", + "documentation":"

An Amazon Ion binary value contained in a ValueHolder structure.

" + }, + "IonText":{ + "shape":"IonText", + "documentation":"

An Amazon Ion plaintext value contained in a ValueHolder structure.

" + } + }, + "documentation":"

A structure that can contains values in multiple encoding formats.

" + }, + "ValueHolders":{ + "type":"list", + "member":{"shape":"ValueHolder"} + } + }, + "documentation":"

The transactional data APIs for Amazon QLDB

" +} diff --git a/botocore/data/qldb/2019-01-02/paginators-1.json b/botocore/data/qldb/2019-01-02/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/qldb/2019-01-02/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/qldb/2019-01-02/service-2.json b/botocore/data/qldb/2019-01-02/service-2.json new file mode 100644 index 00000000..ab53e53a --- /dev/null +++ b/botocore/data/qldb/2019-01-02/service-2.json @@ -0,0 +1,1036 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-01-02", + "endpointPrefix":"qldb", + "jsonVersion":"1.0", + "protocol":"rest-json", + "serviceAbbreviation":"QLDB", + "serviceFullName":"Amazon QLDB", + "serviceId":"QLDB", + "signatureVersion":"v4", + "signingName":"qldb", + "uid":"qldb-2019-01-02" + }, + "operations":{ + "CreateLedger":{ + "name":"CreateLedger", + "http":{ + "method":"POST", + "requestUri":"/ledgers" + }, + "input":{"shape":"CreateLedgerRequest"}, + "output":{"shape":"CreateLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Creates a new ledger in your AWS account.

" + }, + "DeleteLedger":{ + "name":"DeleteLedger", + "http":{ + "method":"DELETE", + "requestUri":"/ledgers/{name}" + }, + "input":{"shape":"DeleteLedgerRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Deletes a ledger and all of its contents. This action is irreversible.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" + }, + "DescribeJournalS3Export":{ + "name":"DescribeJournalS3Export", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-s3-exports/{exportId}" + }, + "input":{"shape":"DescribeJournalS3ExportRequest"}, + "output":{"shape":"DescribeJournalS3ExportResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

" + }, + "DescribeLedger":{ + "name":"DescribeLedger", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}" + }, + "input":{"shape":"DescribeLedgerRequest"}, + "output":{"shape":"DescribeLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns information about a ledger, including its state and when it was created.

" + }, + "ExportJournalToS3":{ + "name":"ExportJournalToS3", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/journal-s3-exports" + }, + "input":{"shape":"ExportJournalToS3Request"}, + "output":{"shape":"ExportJournalToS3Response"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Exports journal contents within a date and time range from a ledger into a specified Amazon Simple Storage Service (Amazon S3) bucket. The data is written as files in Amazon Ion format.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name is in CREATING status, then throws ResourcePreconditionNotMetException.

You can initiate up to two concurrent journal export requests for each ledger. Beyond this limit, journal export requests throw LimitExceededException.

" + }, + "GetBlock":{ + "name":"GetBlock", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/block" + }, + "input":{"shape":"GetBlockRequest"}, + "output":{"shape":"GetBlockResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Returns a journal block object at a specified address in a ledger. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

" + }, + "GetDigest":{ + "name":"GetDigest", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/digest" + }, + "input":{"shape":"GetDigestRequest"}, + "output":{"shape":"GetDigestResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Returns the digest of a ledger at the latest committed block in the journal. The response includes a 256-bit hash value and a block address.

" + }, + "GetRevision":{ + "name":"GetRevision", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/revision" + }, + "input":{"shape":"GetRevisionRequest"}, + "output":{"shape":"GetRevisionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Returns a revision data object for a specified document ID and block address. Also returns a proof of the specified revision for verification if DigestTipAddress is provided.

" + }, + "ListJournalS3Exports":{ + "name":"ListJournalS3Exports", + "http":{ + "method":"GET", + "requestUri":"/journal-s3-exports" + }, + "input":{"shape":"ListJournalS3ExportsRequest"}, + "output":{"shape":"ListJournalS3ExportsResponse"}, + "documentation":"

Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

" + }, + "ListJournalS3ExportsForLedger":{ + "name":"ListJournalS3ExportsForLedger", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-s3-exports" + }, + "input":{"shape":"ListJournalS3ExportsForLedgerRequest"}, + "output":{"shape":"ListJournalS3ExportsForLedgerResponse"}, + "documentation":"

Returns an array of journal export job descriptions for a specified ledger.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple times.

" + }, + "ListLedgers":{ + "name":"ListLedgers", + "http":{ + "method":"GET", + "requestUri":"/ledgers" + }, + "input":{"shape":"ListLedgersRequest"}, + "output":{"shape":"ListLedgersResponse"}, + "documentation":"

Returns an array of ledger summaries that are associated with the current AWS account and Region.

This action returns a maximum of 100 items and is paginated so that you can retrieve all the items by calling ListLedgers multiple times.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns all tags for a specified Amazon QLDB resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds one or more tags to a specified Amazon QLDB resource.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, your request fails and returns an error.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes one or more tags from a specified Amazon QLDB resource. You can specify up to 50 tag keys to remove.

" + }, + "UpdateLedger":{ + "name":"UpdateLedger", + "http":{ + "method":"PATCH", + "requestUri":"/ledgers/{name}" + }, + "input":{"shape":"UpdateLedgerRequest"}, + "output":{"shape":"UpdateLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates properties on a ledger.

" + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":1600, + "min":20 + }, + "CreateLedgerRequest":{ + "type":"structure", + "required":[ + "Name", + "PermissionsMode" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to add as tags to the ledger that you want to create. Tag keys are case sensitive. Tag values are case sensitive and can be null.

" + }, + "PermissionsMode":{ + "shape":"PermissionsMode", + "documentation":"

The permissions mode to assign to the ledger that you want to create.

" + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" + } + } + }, + "CreateLedgerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the ledger.

" + }, + "State":{ + "shape":"LedgerState", + "documentation":"

The current status of the ledger.

" + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

" + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" + } + } + }, + "DeleteLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger that you want to delete.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeletionProtection":{"type":"boolean"}, + "DescribeJournalS3ExportRequest":{ + "type":"structure", + "required":[ + "Name", + "ExportId" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "ExportId":{ + "shape":"UniqueId", + "documentation":"

The unique ID of the journal export job that you want to describe.

", + "location":"uri", + "locationName":"exportId" + } + } + }, + "DescribeJournalS3ExportResponse":{ + "type":"structure", + "required":["ExportDescription"], + "members":{ + "ExportDescription":{ + "shape":"JournalS3ExportDescription", + "documentation":"

Information about the journal export job returned by a DescribeJournalS3Export request.

" + } + } + }, + "DescribeLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger that you want to describe.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeLedgerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the ledger.

" + }, + "State":{ + "shape":"LedgerState", + "documentation":"

The current status of the ledger.

" + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

" + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" + } + } + }, + "Digest":{ + "type":"blob", + "max":32, + "min":32 + }, + "ErrorMessage":{"type":"string"}, + "ExportJournalToS3Request":{ + "type":"structure", + "required":[ + "Name", + "InclusiveStartTime", + "ExclusiveEndTime", + "S3ExportConfiguration", + "RoleArn" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

The inclusive start date and time for the range of journal contents that you want to export.

The InclusiveStartTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

The InclusiveStartTime must be before ExclusiveEndTime.

If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, Amazon QLDB defaults it to the ledger's CreationDateTime.

" + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

The exclusive end date and time for the range of journal contents that you want to export.

The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

The ExclusiveEndTime must be less than or equal to the current UTC date and time.

" + }, + "S3ExportConfiguration":{ + "shape":"S3ExportConfiguration", + "documentation":"

The configuration settings of the Amazon S3 bucket destination for your export request.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

" + } + } + }, + "ExportJournalToS3Response":{ + "type":"structure", + "required":["ExportId"], + "members":{ + "ExportId":{ + "shape":"UniqueId", + "documentation":"

The unique ID that QLDB assigns to each journal export job.

To describe your export request and check the status of the job, you can use ExportId to call DescribeJournalS3Export.

" + } + } + }, + "ExportStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "CANCELLED" + ] + }, + "GetBlockRequest":{ + "type":"structure", + "required":[ + "Name", + "BlockAddress" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "BlockAddress":{ + "shape":"ValueHolder", + "documentation":"

The location of the block that you want to request. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:14}

" + }, + "DigestTipAddress":{ + "shape":"ValueHolder", + "documentation":"

The latest block location covered by the digest for which to request a proof. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:49}

" + } + } + }, + "GetBlockResponse":{ + "type":"structure", + "required":["Block"], + "members":{ + "Block":{ + "shape":"ValueHolder", + "documentation":"

The block data object in Amazon Ion format.

" + }, + "Proof":{ + "shape":"ValueHolder", + "documentation":"

The proof object in Amazon Ion format returned by a GetBlock request. A proof contains the list of hash values required to recalculate the specified digest using a Merkle tree, starting with the specified block.

" + } + } + }, + "GetDigestRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetDigestResponse":{ + "type":"structure", + "required":[ + "Digest", + "DigestTipAddress" + ], + "members":{ + "Digest":{ + "shape":"Digest", + "documentation":"

The 256-bit hash value representing the digest returned by a GetDigest request.

" + }, + "DigestTipAddress":{ + "shape":"ValueHolder", + "documentation":"

The latest block location covered by the digest that you requested. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

" + } + } + }, + "GetRevisionRequest":{ + "type":"structure", + "required":[ + "Name", + "BlockAddress", + "DocumentId" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "BlockAddress":{ + "shape":"ValueHolder", + "documentation":"

The block location of the document revision to be verified. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:14}

" + }, + "DocumentId":{ + "shape":"UniqueId", + "documentation":"

The unique ID of the document to be verified.

" + }, + "DigestTipAddress":{ + "shape":"ValueHolder", + "documentation":"

The latest block location covered by the digest for which to request a proof. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:49}

" + } + } + }, + "GetRevisionResponse":{ + "type":"structure", + "required":["Revision"], + "members":{ + "Proof":{ + "shape":"ValueHolder", + "documentation":"

The proof object in Amazon Ion format returned by a GetRevision request. A proof contains the list of hash values that are required to recalculate the specified digest using a Merkle tree, starting with the specified document revision.

" + }, + "Revision":{ + "shape":"ValueHolder", + "documentation":"

The document revision data object in Amazon Ion format.

" + } + } + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ParameterName":{ + "shape":"ParameterName", + "documentation":"

The name of the invalid parameter.

" + } + }, + "documentation":"

One or more parameters in the request aren't valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "IonText":{ + "type":"string", + "max":1048576, + "min":1, + "sensitive":true + }, + "JournalS3ExportDescription":{ + "type":"structure", + "required":[ + "LedgerName", + "ExportId", + "ExportCreationTime", + "Status", + "InclusiveStartTime", + "ExclusiveEndTime", + "S3ExportConfiguration", + "RoleArn" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

" + }, + "ExportId":{ + "shape":"UniqueId", + "documentation":"

The unique ID of the journal export job.

" + }, + "ExportCreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the export job was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

" + }, + "Status":{ + "shape":"ExportStatus", + "documentation":"

The current state of the journal export job.

" + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

The inclusive start date and time for the range of journal contents that are specified in the original export request.

" + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

The exclusive end date and time for the range of journal contents that are specified in the original export request.

" + }, + "S3ExportConfiguration":{"shape":"S3ExportConfiguration"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

" + } + }, + "documentation":"

The information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

" + }, + "JournalS3ExportList":{ + "type":"list", + "member":{"shape":"JournalS3ExportDescription"} + }, + "LedgerList":{ + "type":"list", + "member":{"shape":"LedgerSummary"} + }, + "LedgerName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?!^.*--)(?!^[0-9]+$)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" + }, + "LedgerState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "DELETED" + ] + }, + "LedgerSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

" + }, + "State":{ + "shape":"LedgerState", + "documentation":"

The current status of the ledger.

" + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

" + } + }, + "documentation":"

Information about a ledger, including its name, state, and when it was created.

" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + } + }, + "documentation":"

You have reached the limit on the maximum number of resources allowed.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListJournalS3ExportsForLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single ListJournalS3ExportsForLedger request. (The actual number of results returned might be fewer.)

", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalS3ExportsForLedger call, then you should use that value as input here.

", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListJournalS3ExportsForLedgerResponse":{ + "type":"structure", + "members":{ + "JournalS3Exports":{ + "shape":"JournalS3ExportList", + "documentation":"

The array of journal export job descriptions that are associated with the specified ledger.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"" + } + } + }, + "ListJournalS3ExportsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single ListJournalS3Exports request. (The actual number of results returned might be fewer.)

", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalS3Exports call, then you should use that value as input here.

", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListJournalS3ExportsResponse":{ + "type":"structure", + "members":{ + "JournalS3Exports":{ + "shape":"JournalS3ExportList", + "documentation":"

The array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"" + } + } + }, + "ListLedgersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single ListLedgers request. (The actual number of results returned might be fewer.)

", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListLedgers call, then you should use that value as input here.

", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListLedgersResponse":{ + "type":"structure", + "members":{ + "Ledgers":{ + "shape":"LedgerList", + "documentation":"

The array of ledger summaries that are associated with the current AWS account and Region.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token, indicating whether there are more results available:

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for which you want to list the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

The tags that are currently associated with the specified Amazon QLDB resource.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":4, + "pattern":"^[A-Za-z-0-9+/=]+$" + }, + "ParameterName":{"type":"string"}, + "PermissionsMode":{ + "type":"string", + "enum":["ALLOW_ALL"] + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the resource.

" + } + }, + "documentation":"

The specified resource already exists.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the resource.

" + } + }, + "documentation":"

The specified resource can't be modified at this time.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceName":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the resource.

" + } + }, + "documentation":"

The specified resource doesn't exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePreconditionNotMetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

The name of the resource.

" + } + }, + "documentation":"

The operation failed because a condition wasn't satisfied in advance.

", + "error":{"httpStatusCode":412}, + "exception":true + }, + "ResourceType":{"type":"string"}, + "S3Bucket":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^[A-Za-z-0-9-_.]+$" + }, + "S3EncryptionConfiguration":{ + "type":"structure", + "required":["ObjectEncryptionType"], + "members":{ + "ObjectEncryptionType":{ + "shape":"S3ObjectEncryptionType", + "documentation":"

The Amazon S3 object encryption type.

To learn more about server-side encryption options in Amazon S3, see Protecting Data Using Server-Side Encryption in the Amazon S3 Developer Guide.

" + }, + "KmsKeyArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for a customer master key (CMK) in AWS Key Management Service (AWS KMS).

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

" + } + }, + "documentation":"

The encryption settings that are used by a journal export job to write data in an Amazon Simple Storage Service (Amazon S3) bucket.

" + }, + "S3ExportConfiguration":{ + "type":"structure", + "required":[ + "Bucket", + "Prefix", + "EncryptionConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The Amazon S3 bucket name in which a journal export job writes the journal contents.

The bucket name must comply with the Amazon S3 bucket naming conventions. For more information, see Bucket Restrictions and Limitations in the Amazon S3 Developer Guide.

" + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

The prefix for the Amazon S3 bucket in which a journal export job writes the journal contents.

The prefix must comply with Amazon S3 key naming rules and restrictions. For more information, see Object Key and Metadata in the Amazon S3 Developer Guide.

The following are examples of valid Prefix values:

" + }, + "EncryptionConfiguration":{ + "shape":"S3EncryptionConfiguration", + "documentation":"

The encryption settings that are used by a journal export job to write data in an Amazon S3 bucket.

" + } + }, + "documentation":"

The Amazon Simple Storage Service (Amazon S3) bucket location in which a journal export job writes the journal contents.

" + }, + "S3ObjectEncryptionType":{ + "type":"string", + "enum":[ + "SSE_KMS", + "SSE_S3", + "NO_ENCRYPTION" + ] + }, + "S3Prefix":{ + "type":"string", + "max":128, + "min":0 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) to which you want to add the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to add as tags to the specified QLDB resource. Tag keys are case sensitive. If you specify a key that already exists for the resource, your request fails and returns an error. Tag values are case sensitive and can be null.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "Timestamp":{"type":"timestamp"}, + "UniqueId":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[A-Za-z-0-9]+$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) from which you want to remove the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys that you want to remove.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" + } + } + }, + "UpdateLedgerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the ledger.

" + }, + "State":{ + "shape":"LedgerState", + "documentation":"

The current status of the ledger.

" + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

" + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" + } + } + }, + "ValueHolder":{ + "type":"structure", + "members":{ + "IonText":{ + "shape":"IonText", + "documentation":"

An Amazon Ion plaintext value contained in a ValueHolder structure.

" + } + }, + "documentation":"

A structure that can contain an Amazon Ion value in multiple encoding formats.

", + "sensitive":true + } + }, + "documentation":"

The control plane for Amazon QLDB

" +} diff --git a/botocore/data/ram/2018-01-04/service-2.json b/botocore/data/ram/2018-01-04/service-2.json index 6957aa5b..e227e50d 100644 --- a/botocore/data/ram/2018-01-04/service-2.json +++ b/botocore/data/ram/2018-01-04/service-2.json @@ -28,7 +28,9 @@ {"shape":"ResourceShareInvitationAlreadyRejectedException"}, {"shape":"ResourceShareInvitationExpiredException"}, {"shape":"ServerInternalException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidClientTokenException"}, + {"shape":"IdempotentParameterMismatchException"} ], "documentation":"

Accepts an invitation to a resource share from another AWS account.

" }, @@ -73,6 +75,7 @@ {"shape":"InvalidParameterException"}, {"shape":"OperationNotPermittedException"}, {"shape":"ResourceShareLimitExceededException"}, + {"shape":"TagPolicyViolationException"}, {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], @@ -134,7 +137,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Enables resource sharing within your organization.

" + "documentation":"

Enables resource sharing within your AWS Organization.

The caller must be the master account for the AWS Organization.

" }, "GetResourcePolicies":{ "name":"GetResourcePolicies", @@ -151,7 +154,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Gets the policies for the specifies resources.

" + "documentation":"

Gets the policies for the specified resources that you own and have shared.

" }, "GetResourceShareAssociations":{ "name":"GetResourceShareAssociations", @@ -170,7 +173,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Gets the associations for the specified resource share.

" + "documentation":"

Gets the resources or principals for the resource shares that you own.

" }, "GetResourceShareInvitations":{ "name":"GetResourceShareInvitations", @@ -189,7 +192,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Gets the specified invitations for resource sharing.

" + "documentation":"

Gets the invitations for resource sharing that you've received.

" }, "GetResourceShares":{ "name":"GetResourceShares", @@ -207,7 +210,28 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Gets the specified resource shares or all of your resource shares.

" + "documentation":"

Gets the resource shares that you own or the resource shares that are shared with you.

" + }, + "ListPendingInvitationResources":{ + "name":"ListPendingInvitationResources", + "http":{ + "method":"POST", + "requestUri":"/listpendinginvitationresources" + }, + "input":{"shape":"ListPendingInvitationResourcesRequest"}, + "output":{"shape":"ListPendingInvitationResourcesResponse"}, + "errors":[ + {"shape":"MalformedArnException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerInternalException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceShareInvitationArnNotFoundException"}, + {"shape":"MissingRequiredParameterException"}, + {"shape":"ResourceShareInvitationAlreadyRejectedException"}, + {"shape":"ResourceShareInvitationExpiredException"} + ], + "documentation":"

Lists the resources in a resource share that is shared with you but that the invitation is still pending for.

" }, "ListPrincipals":{ "name":"ListPrincipals", @@ -225,7 +249,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Lists the principals with access to the specified resource.

" + "documentation":"

Lists the principals that you have shared resources with or the principals that have shared resources with you.

" }, "ListResources":{ "name":"ListResources", @@ -244,7 +268,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Lists the resources that the specified principal can access.

" + "documentation":"

Lists the resources that you added to a resource shares or the resources that are shared with you.

" }, "RejectResourceShareInvitation":{ "name":"RejectResourceShareInvitation", @@ -262,7 +286,9 @@ {"shape":"ResourceShareInvitationAlreadyRejectedException"}, {"shape":"ResourceShareInvitationExpiredException"}, {"shape":"ServerInternalException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidClientTokenException"}, + {"shape":"IdempotentParameterMismatchException"} ], "documentation":"

Rejects an invitation to a resource share from another AWS account.

" }, @@ -279,10 +305,11 @@ {"shape":"MalformedArnException"}, {"shape":"TagLimitExceededException"}, {"shape":"ResourceArnNotFoundException"}, + {"shape":"TagPolicyViolationException"}, {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Adds the specified tags to the specified resource share.

" + "documentation":"

Adds the specified tags to the specified resource share that you own.

" }, "UntagResource":{ "name":"UntagResource", @@ -297,7 +324,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Removes the specified tags from the specified resource share.

" + "documentation":"

Removes the specified tags from the specified resource share that you own.

" }, "UpdateResourceShare":{ "name":"UpdateResourceShare", @@ -318,7 +345,7 @@ {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Updates the specified resource share.

" + "documentation":"

Updates the specified resource share that you own.

" } }, "shapes":{ @@ -407,7 +434,7 @@ }, "allowExternalPrincipals":{ "shape":"Boolean", - "documentation":"

Indicates whether principals outside your organization can be associated with a resource share.

" + "documentation":"

Indicates whether principals outside your AWS organization can be associated with a resource share.

" }, "clientToken":{ "shape":"String", @@ -558,15 +585,15 @@ }, "resourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + "documentation":"

The Amazon Resource Name (ARN) of the resource. You cannot specify this parameter if the association type is PRINCIPAL.

" }, "principal":{ "shape":"String", - "documentation":"

The principal.

" + "documentation":"

The principal. You cannot specify this parameter if the association type is RESOURCE.

" }, "associationStatus":{ "shape":"ResourceShareAssociationStatus", - "documentation":"

The status of the association.

" + "documentation":"

The association status.

" }, "nextToken":{ "shape":"String", @@ -583,7 +610,7 @@ "members":{ "resourceShareAssociations":{ "shape":"ResourceShareAssociationList", - "documentation":"

Information about the association.

" + "documentation":"

Information about the associations.

" }, "nextToken":{ "shape":"String", @@ -742,6 +769,37 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ListPendingInvitationResourcesRequest":{ + "type":"structure", + "required":["resourceShareInvitationArn"], + "members":{ + "resourceShareInvitationArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the invitation.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The token for the next page of results.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + } + } + }, + "ListPendingInvitationResourcesResponse":{ + "type":"structure", + "members":{ + "resources":{ + "shape":"ResourceList", + "documentation":"

Information about the resources included the resource share.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "ListPrincipalsRequest":{ "type":"structure", "required":["resourceOwner"], @@ -760,7 +818,7 @@ }, "resourceType":{ "shape":"String", - "documentation":"

The resource type.

" + "documentation":"

The resource type.

Valid values: route53resolver:ResolverRule | ec2:TransitGateway | ec2:Subnet | license-manager:LicenseConfiguration

" }, "resourceShareArns":{ "shape":"ResourceShareArnList", @@ -803,7 +861,7 @@ }, "resourceType":{ "shape":"String", - "documentation":"

The resource type.

" + "documentation":"

The resource type.

Valid values: route53resolver:ResolverRule | ec2:TransitGateway | ec2:Subnet | license-manager:LicenseConfiguration

" }, "resourceArns":{ "shape":"ResourceArnList", @@ -897,7 +955,7 @@ }, "external":{ "shape":"Boolean", - "documentation":"

Indicates whether the principal belongs to the same organization as the AWS account that owns the resource share.

" + "documentation":"

Indicates whether the principal belongs to the same AWS organization as the AWS account that owns the resource share.

" } }, "documentation":"

Describes a principal for use with AWS Resource Access Manager.

" @@ -1013,7 +1071,7 @@ }, "allowExternalPrincipals":{ "shape":"Boolean", - "documentation":"

Indicates whether principals outside your organization can be associated with a resource share.

" + "documentation":"

Indicates whether principals outside your AWS organization can be associated with a resource share.

" }, "status":{ "shape":"ResourceShareStatus", @@ -1049,6 +1107,10 @@ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the resource share.

" }, + "resourceShareName":{ + "shape":"String", + "documentation":"

The name of the resource share.

" + }, "associatedEntity":{ "shape":"String", "documentation":"

The associated entity. For resource associations, this is the ARN of the resource. For principal associations, this is the ID of an AWS account or the ARN of an OU or organization from AWS Organizations.

" @@ -1075,7 +1137,7 @@ }, "external":{ "shape":"Boolean", - "documentation":"

Indicates whether the principal belongs to the same organization as the AWS account that owns the resource share.

" + "documentation":"

Indicates whether the principal belongs to the same AWS organization as the AWS account that owns the resource share.

" } }, "documentation":"

Describes an association with a resource share.

" @@ -1134,7 +1196,9 @@ }, "resourceShareAssociations":{ "shape":"ResourceShareAssociationList", - "documentation":"

The resources associated with the resource share.

" + "documentation":"

To view the resources associated with a pending resource share invitation, use ListPendingInvitationResources.

", + "deprecated":true, + "deprecatedMessage":"This member has been deprecated. Use ListPendingInvitationResources." } }, "documentation":"

Describes an invitation to join a resource share.

" @@ -1226,7 +1290,8 @@ "AVAILABLE", "ZONAL_RESOURCE_INACCESSIBLE", "LIMIT_EXCEEDED", - "UNAVAILABLE" + "UNAVAILABLE", + "PENDING" ] }, "ServerInternalException":{ @@ -1301,6 +1366,16 @@ "type":"list", "member":{"shape":"Tag"} }, + "TagPolicyViolationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The specified tag is a reserved word and cannot be used.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "TagResourceRequest":{ "type":"structure", "required":[ @@ -1374,7 +1449,7 @@ }, "allowExternalPrincipals":{ "shape":"Boolean", - "documentation":"

Indicates whether principals outside your organization can be associated with a resource share.

" + "documentation":"

Indicates whether principals outside your AWS organization can be associated with a resource share.

" }, "clientToken":{ "shape":"String", @@ -1396,5 +1471,5 @@ } } }, - "documentation":"

Use AWS Resource Access Manager to share AWS resources between AWS accounts. To share a resource, you create a resource share, associate the resource with the resource share, and specify the principals that can access the resource. The following principals are supported:

If you specify an AWS account that doesn't exist in the same organization as the account that owns the resource share, the owner of the specified account receives an invitation to accept the resource share. After the owner accepts the invitation, they can access the resources in the resource share. An administrator of the specified account can use IAM policies to restrict access resources in the resource share.

" + "documentation":"

Use AWS Resource Access Manager to share AWS resources between AWS accounts. To share a resource, you create a resource share, associate the resource with the resource share, and specify the principals that can access the resources associated with the resource share. The following principals are supported: AWS accounts, organizational units (OU) from AWS Organizations, and organizations from AWS Organizations.

For more information, see the AWS Resource Access Manager User Guide.

" } diff --git a/botocore/data/rds-data/2018-08-01/service-2.json b/botocore/data/rds-data/2018-08-01/service-2.json index 345dbd95..53cee2dc 100644 --- a/botocore/data/rds-data/2018-08-01/service-2.json +++ b/botocore/data/rds-data/2018-08-01/service-2.json @@ -1,908 +1,864 @@ { - "version": "2.0", - "metadata": { - "apiVersion": "2018-08-01", - "endpointPrefix": "rds-data", - "jsonVersion": "1.1", - "protocol": "rest-json", - "serviceFullName": "AWS RDS DataService", - "serviceId": "RDS Data", - "signatureVersion": "v4", - "signingName": "rds-data", - "uid": "rds-data-2018-08-01" + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-01", + "endpointPrefix":"rds-data", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS RDS DataService", + "serviceId":"RDS Data", + "signatureVersion":"v4", + "signingName":"rds-data", + "uid":"rds-data-2018-08-01" }, - "documentation": "Amazon RDS Data Service\n

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora\n Serverless DB cluster. To run these statements, you work with the Data Service\n API.

\n

For more information about the Data Service API, see Using the Data API for Aurora\n Serverless in the Amazon Aurora User Guide.

", - "operations": { - "BatchExecuteStatement": { - "name": "BatchExecuteStatement", - "http": { - "method": "POST", - "requestUri": "/BatchExecute", - "responseCode": 200 + "operations":{ + "BatchExecuteStatement":{ + "name":"BatchExecuteStatement", + "http":{ + "method":"POST", + "requestUri":"/BatchExecute", + "responseCode":200 }, - "input": { - "shape": "BatchExecuteStatementRequest" - }, - "output": { - "shape": "BatchExecuteStatementResponse" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "ServiceUnavailableError" - }, - { - "shape": "StatementTimeoutException" - } + "input":{"shape":"BatchExecuteStatementRequest"}, + "output":{"shape":"BatchExecuteStatementResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"StatementTimeoutException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableError"} ], - "documentation": "

Runs a batch SQL statement over an array of data.

\n

You can run bulk update and insert operations for multiple records using a DML \n statement with different parameter sets. Bulk operations can provide a significant \n performance improvement over individual insert and update operations.

\n \n

If a call isn't part of a transaction because it doesn't include the\n transactionID parameter, changes that result from the call are\n committed automatically.

\n
" + "documentation":"

Runs a batch SQL statement over an array of data.

You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations.

If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.

" }, - "BeginTransaction": { - "name": "BeginTransaction", - "http": { - "method": "POST", - "requestUri": "/BeginTransaction", - "responseCode": 200 + "BeginTransaction":{ + "name":"BeginTransaction", + "http":{ + "method":"POST", + "requestUri":"/BeginTransaction", + "responseCode":200 }, - "input": { - "shape": "BeginTransactionRequest" - }, - "output": { - "shape": "BeginTransactionResponse" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "ServiceUnavailableError" - }, - { - "shape": "StatementTimeoutException" - } + "input":{"shape":"BeginTransactionRequest"}, + "output":{"shape":"BeginTransactionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"StatementTimeoutException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableError"} ], - "documentation": "

Starts a SQL transaction.

\n \n \n

A transaction can run for a maximum of 24 hours. A transaction is terminated and \n rolled back automatically after 24 hours.

\n

A transaction times out if no calls use its transaction ID in three minutes. \n If a transaction times out before it's committed, it's rolled back\n automatically.

\n

DDL statements inside a transaction cause an implicit commit. We recommend \n that you run each DDL statement in a separate ExecuteStatement call with \n continueAfterTimeout enabled.

\n
" + "documentation":"

Starts a SQL transaction.

 <important> <p>A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24 hours.</p> <p>A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's committed, it's rolled back automatically.</p> <p>DDL statements inside a transaction cause an implicit commit. We recommend that you run each DDL statement in a separate <code>ExecuteStatement</code> call with <code>continueAfterTimeout</code> enabled.</p> </important> 
" }, - "CommitTransaction": { - "name": "CommitTransaction", - "http": { - "method": "POST", - "requestUri": "/CommitTransaction", - "responseCode": 200 + "CommitTransaction":{ + "name":"CommitTransaction", + "http":{ + "method":"POST", + "requestUri":"/CommitTransaction", + "responseCode":200 }, - "input": { - "shape": "CommitTransactionRequest" - }, - "output": { - "shape": "CommitTransactionResponse" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableError" - } + "input":{"shape":"CommitTransactionRequest"}, + "output":{"shape":"CommitTransactionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableError"}, + {"shape":"NotFoundException"} ], - "documentation": "

Ends a SQL transaction started with the BeginTransaction operation and\n commits the changes.

" + "documentation":"

Ends a SQL transaction started with the BeginTransaction operation and commits the changes.

" }, - "ExecuteSql": { - "name": "ExecuteSql", - "http": { - "method": "POST", - "requestUri": "/ExecuteSql", - "responseCode": 200 + "ExecuteSql":{ + "name":"ExecuteSql", + "http":{ + "method":"POST", + "requestUri":"/ExecuteSql", + "responseCode":200 }, - "input": { - "shape": "ExecuteSqlRequest" - }, - "output": { - "shape": "ExecuteSqlResponse" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "ServiceUnavailableError" - } + "input":{"shape":"ExecuteSqlRequest"}, + "output":{"shape":"ExecuteSqlResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableError"} ], - "deprecated": true, - "documentation": "

Runs one or more SQL statements.

\n \n

This operation is deprecated. Use the BatchExecuteStatement or\n ExecuteStatement operation.

\n
" + "documentation":"

Runs one or more SQL statements.

This operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation.

", + "deprecated":true, + "deprecatedMessage":"The ExecuteSql API is deprecated, please use the ExecuteStatement API." }, - "ExecuteStatement": { - "name": "ExecuteStatement", - "http": { - "method": "POST", - "requestUri": "/Execute", - "responseCode": 200 + "ExecuteStatement":{ + "name":"ExecuteStatement", + "http":{ + "method":"POST", + "requestUri":"/Execute", + "responseCode":200 }, - "input": { - "shape": "ExecuteStatementRequest" - }, - "output": { - "shape": "ExecuteStatementResponse" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "ServiceUnavailableError" - }, - { - "shape": "StatementTimeoutException" - } + "input":{"shape":"ExecuteStatementRequest"}, + "output":{"shape":"ExecuteStatementResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"StatementTimeoutException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableError"} ], - "documentation": "

Runs a SQL statement against a database.

\n \n

If a call isn't part of a transaction because it doesn't include the\n transactionID parameter, changes that result from the call are\n committed automatically.

\n
\n

The response size limit is 1 MB or 1,000 records. If the call returns more than 1 MB of response data or over 1,000 records, the call is terminated.

" + "documentation":"

Runs a SQL statement against a database.

If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.

The response size limit is 1 MB or 1,000 records. If the call returns more than 1 MB of response data or over 1,000 records, the call is terminated.

" }, - "RollbackTransaction": { - "name": "RollbackTransaction", - "http": { - "method": "POST", - "requestUri": "/RollbackTransaction", - "responseCode": 200 + "RollbackTransaction":{ + "name":"RollbackTransaction", + "http":{ + "method":"POST", + "requestUri":"/RollbackTransaction", + "responseCode":200 }, - "input": { - "shape": "RollbackTransactionRequest" - }, - "output": { - "shape": "RollbackTransactionResponse" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableError" - } + "input":{"shape":"RollbackTransactionRequest"}, + "output":{"shape":"RollbackTransactionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableError"}, + {"shape":"NotFoundException"} ], - "documentation": "

Performs a rollback of a transaction. Rolling back a transaction cancels its changes.

" + "documentation":"

Performs a rollback of a transaction. Rolling back a transaction cancels its changes.

" } }, - "shapes": { - "SqlStatementResults": { - "type": "list", - "member": { - "shape": "SqlStatementResult" - } + "shapes":{ + "Arn":{ + "type":"string", + "max":100, + "min":11 }, - "ResultFrame": { - "type": "structure", - "members": { - "records": { - "shape": "Records", - "documentation": "

The records in the result set.

" + "ArrayOfArray":{ + "type":"list", + "member":{"shape":"ArrayValue"}, + "documentation":"

An array of arrays.

Some array entries can be null.

" + }, + "ArrayValue":{ + "type":"structure", + "members":{ + "arrayValues":{ + "shape":"ArrayOfArray", + "documentation":"

An array of arrays.

" }, - "resultSetMetadata": { - "shape": "ResultSetMetadata", - "documentation": "

The result-set metadata in the result set.

" + "booleanValues":{ + "shape":"BooleanArray", + "documentation":"

An array of Boolean values.

" + }, + "doubleValues":{ + "shape":"DoubleArray", + "documentation":"

An array of integers.

" + }, + "longValues":{ + "shape":"LongArray", + "documentation":"

An array of floating point numbers.

" + }, + "stringValues":{ + "shape":"StringArray", + "documentation":"

An array of strings.

" } }, - "documentation": "

The result set returned by a SQL statement.

" + "documentation":"

Contains an array.

" }, - "SqlParameterSets": { - "type": "list", - "member": { - "shape": "SqlParametersList" - } + "ArrayValueList":{ + "type":"list", + "member":{"shape":"Value"} }, - "NotFoundException": { - "type": "structure", - "members": { - "message": { - "shape": "ErrorMessage", - "documentation": "

The error message returned by this NotFoundException error.

" + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message returned by this BadRequestException error.

" } }, - "documentation": "

The resourceArn, secretArn, or transactionId value can't be found.

", - "exception": true, - "error": { - "code": "NotFoundException", - "httpStatusCode": 404, - "senderFault": true - } + "documentation":"

There is an error in the call or in a SQL statement.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true }, - "BatchExecuteStatementRequest": { - "type": "structure", - "required": [ + "BatchExecuteStatementRequest":{ + "type":"structure", + "required":[ "resourceArn", "secretArn", "sql" ], - "members": { - "database": { - "shape": "DbName", - "documentation": "

The name of the database.

" + "members":{ + "database":{ + "shape":"DbName", + "documentation":"

The name of the database.

" }, - "parameterSets": { - "shape": "SqlParameterSets", - "documentation": "

The parameter set for the batch operation.

" + "parameterSets":{ + "shape":"SqlParameterSets", + "documentation":"

The parameter set for the batch operation.

" }, - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" }, - "schema": { - "shape": "DbName", - "documentation": "

The name of the database schema.

" + "schema":{ + "shape":"DbName", + "documentation":"

The name of the database schema.

" }, - "secretArn": { - "shape": "Arn", - "documentation": "

The name or ARN of the secret that enables access to the DB cluster.

" + "secretArn":{ + "shape":"Arn", + "documentation":"

The name or ARN of the secret that enables access to the DB cluster.

" }, - "sql": { - "shape": "SqlStatement", - "documentation": "

The SQL statement to run.

" + "sql":{ + "shape":"SqlStatement", + "documentation":"

The SQL statement to run.

" }, - "transactionId": { - "shape": "Id", - "documentation": "

The identifier of a transaction that was started by using the\n BeginTransaction operation. Specify the transaction ID of the\n transaction that you want to include the SQL statement in.

\n

If the SQL statement is not part of a transaction, don't set this\n parameter.

" + "transactionId":{ + "shape":"Id", + "documentation":"

The identifier of a transaction that was started by using the BeginTransaction operation. Specify the transaction ID of the transaction that you want to include the SQL statement in.

If the SQL statement is not part of a transaction, don't set this parameter.

" } }, - "documentation": "

The request parameters represent the input of a SQL statement over an array of\n data.

" + "documentation":"

The request parameters represent the input of a SQL statement over an array of data.

" }, - "ArrayValueList": { - "type": "list", - "member": { - "shape": "Value" - } - }, - "UpdateResults": { - "type": "list", - "member": { - "shape": "UpdateResult" - } - }, - "Row": { - "type": "list", - "member": { - "shape": "Value" - } - }, - "SqlRecords": { - "type": "list", - "member": { - "shape": "FieldList" - } - }, - "Long": { - "type": "long" - }, - "BoxedInteger": { - "type": "integer", - "box": true - }, - "CommitTransactionResponse": { - "type": "structure", - "members": { - "transactionStatus": { - "shape": "TransactionStatus", - "documentation": "

The status of the commit operation.

" + "BatchExecuteStatementResponse":{ + "type":"structure", + "members":{ + "updateResults":{ + "shape":"UpdateResults", + "documentation":"

The execution results of each batch entry.

" } }, - "documentation": "

The response elements represent the output of a commit transaction request.

" + "documentation":"

The response elements represent the output of a SQL statement over an array of data.

" }, - "Integer": { - "type": "integer" - }, - "BoxedLong": { - "type": "long", - "box": true - }, - "SqlParameter": { - "type": "structure", - "members": { - "name": { - "shape": "ParameterName", - "documentation": "

The name of the parameter.

" + "BeginTransactionRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "secretArn" + ], + "members":{ + "database":{ + "shape":"DbName", + "documentation":"

The name of the database.

" }, - "value": { - "shape": "Field", - "documentation": "

The value of the parameter.

" + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" + }, + "schema":{ + "shape":"DbName", + "documentation":"

The name of the database schema.

" + }, + "secretArn":{ + "shape":"Arn", + "documentation":"

The name or ARN of the secret that enables access to the DB cluster.

" } }, - "documentation": "

A parameter used in a SQL statement.

" + "documentation":"

The request parameters represent the input of a request to start a SQL transaction.

" }, - "Field": { - "type": "structure", - "members": { - "blobValue": { - "shape": "Blob", - "documentation": "

A value of BLOB data type.

" - }, - "booleanValue": { - "shape": "BoxedBoolean", - "documentation": "

A value of Boolean data type.

" - }, - "doubleValue": { - "shape": "BoxedDouble", - "documentation": "

A value of double data type.

" - }, - "isNull": { - "shape": "BoxedBoolean", - "documentation": "

A NULL value.

" - }, - "longValue": { - "shape": "BoxedLong", - "documentation": "

A value of long data type.

" - }, - "stringValue": { - "shape": "String", - "documentation": "

A value of string data type.

" + "BeginTransactionResponse":{ + "type":"structure", + "members":{ + "transactionId":{ + "shape":"Id", + "documentation":"

The transaction ID of the transaction started by the call.

" } }, - "documentation": "

Contains a value.

" + "documentation":"

The response elements represent the output of a request to start a SQL transaction.

" }, - "ExecuteSqlRequest": { - "type": "structure", - "required": [ + "Blob":{"type":"blob"}, + "Boolean":{"type":"boolean"}, + "BooleanArray":{ + "type":"list", + "member":{"shape":"BoxedBoolean"}, + "documentation":"

An array of Boolean values.

Some array entries can be null.

" + }, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedDouble":{ + "type":"double", + "box":true + }, + "BoxedFloat":{ + "type":"float", + "box":true + }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "BoxedLong":{ + "type":"long", + "box":true + }, + "ColumnMetadata":{ + "type":"structure", + "members":{ + "arrayBaseColumnType":{ + "shape":"Integer", + "documentation":"

The type of the column.

" + }, + "isAutoIncrement":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the column increments automatically.

" + }, + "isCaseSensitive":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the column is case-sensitive.

" + }, + "isCurrency":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the column contains currency values.

" + }, + "isSigned":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether an integer column is signed.

" + }, + "label":{ + "shape":"String", + "documentation":"

The label for the column.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "nullable":{ + "shape":"Integer", + "documentation":"

A value that indicates whether the column is nullable.

" + }, + "precision":{ + "shape":"Integer", + "documentation":"

The precision value of a decimal number column.

" + }, + "scale":{ + "shape":"Integer", + "documentation":"

The scale value of a decimal number column.

" + }, + "schemaName":{ + "shape":"String", + "documentation":"

The name of the schema that owns the table that includes the column.

" + }, + "tableName":{ + "shape":"String", + "documentation":"

The name of the table that includes the column.

" + }, + "type":{ + "shape":"Integer", + "documentation":"

The type of the column.

" + }, + "typeName":{ + "shape":"String", + "documentation":"

The database-specific data type of the column.

" + } + }, + "documentation":"

Contains the metadata for a column.

" + }, + "CommitTransactionRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "secretArn", + "transactionId" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" + }, + "secretArn":{ + "shape":"Arn", + "documentation":"

The name or ARN of the secret that enables access to the DB cluster.

" + }, + "transactionId":{ + "shape":"Id", + "documentation":"

The identifier of the transaction to end and commit.

" + } + }, + "documentation":"

The request parameters represent the input of a commit transaction request.

" + }, + "CommitTransactionResponse":{ + "type":"structure", + "members":{ + "transactionStatus":{ + "shape":"TransactionStatus", + "documentation":"

The status of the commit operation.

" + } + }, + "documentation":"

The response elements represent the output of a commit transaction request.

" + }, + "DbName":{ + "type":"string", + "max":64, + "min":0 + }, + "DecimalReturnType":{ + "type":"string", + "enum":[ + "DOUBLE_OR_LONG", + "STRING" + ] + }, + "DoubleArray":{ + "type":"list", + "member":{"shape":"BoxedDouble"}, + "documentation":"

An array of floating point numbers.

Some array entries can be null.

" + }, + "ErrorMessage":{"type":"string"}, + "ExecuteSqlRequest":{ + "type":"structure", + "required":[ "awsSecretStoreArn", "dbClusterOrInstanceArn", "sqlStatements" ], - "members": { - "awsSecretStoreArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster.

" + "members":{ + "awsSecretStoreArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster.

" }, - "database": { - "shape": "DbName", - "documentation": "

The name of the database.

" + "database":{ + "shape":"DbName", + "documentation":"

The name of the database.

" }, - "dbClusterOrInstanceArn": { - "shape": "Arn", - "documentation": "

The ARN of the Aurora Serverless DB cluster.

" + "dbClusterOrInstanceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the Aurora Serverless DB cluster.

" }, - "schema": { - "shape": "DbName", - "documentation": "

The name of the database schema.

" + "schema":{ + "shape":"DbName", + "documentation":"

The name of the database schema.

" }, - "sqlStatements": { - "shape": "SqlStatement", - "documentation": "

One or more SQL statements to run on the DB cluster.

\n

You can separate SQL statements from each other with a semicolon (;). Any valid SQL\n statement is permitted, including data definition, data manipulation, and commit\n statements.

" + "sqlStatements":{ + "shape":"SqlStatement", + "documentation":"

One or more SQL statements to run on the DB cluster.

You can separate SQL statements from each other with a semicolon (;). Any valid SQL statement is permitted, including data definition, data manipulation, and commit statements.

" } }, - "documentation": "

The request parameters represent the input of a request to run one or more SQL\n statements.

" + "documentation":"

The request parameters represent the input of a request to run one or more SQL statements.

" }, - "Arn": { - "type": "string", - "max": 100 - }, - "StructValue": { - "type": "structure", - "members": { - "attributes": { - "shape": "ArrayValueList", - "documentation": "

The attributes returned in the record.

" + "ExecuteSqlResponse":{ + "type":"structure", + "members":{ + "sqlStatementResults":{ + "shape":"SqlStatementResults", + "documentation":"

The results of the SQL statement or statements.

" } }, - "documentation": "

A structure value returned by a call.

" + "documentation":"

The response elements represent the output of a request to run one or more SQL statements.

" }, - "Boolean": { - "type": "boolean" - }, - "BadRequestException": { - "type": "structure", - "members": { - "message": { - "shape": "ErrorMessage", - "documentation": "

The error message returned by this BadRequestException error.

" - } - }, - "documentation": "

There is an error in the call or in a SQL statement.

", - "exception": true, - "error": { - "code": "BadRequestException", - "httpStatusCode": 400, - "senderFault": true - } - }, - "TransactionStatus": { - "type": "string", - "max": 128 - }, - "SqlStatement": { - "type": "string", - "max": 65536 - }, - "RollbackTransactionRequest": { - "type": "structure", - "required": [ - "resourceArn", - "secretArn", - "transactionId" - ], - "members": { - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" - }, - "secretArn": { - "shape": "Arn", - "documentation": "

The name or ARN of the secret that enables access to the DB cluster.

" - }, - "transactionId": { - "shape": "Id", - "documentation": "

The identifier of the transaction to roll back.

" - } - }, - "documentation": "

The request parameters represent the input of a request to perform a rollback of a\n transaction.

" - }, - "ErrorMessage": { - "type": "string" - }, - "Record": { - "type": "structure", - "members": { - "values": { - "shape": "Row", - "documentation": "

The values returned in the record.

" - } - }, - "documentation": "

A record returned by a call.

" - }, - "BoxedFloat": { - "type": "float", - "box": true - }, - "BoxedDouble": { - "type": "double", - "box": true - }, - "ForbiddenException": { - "type": "structure", - "members": { - "message": { - "shape": "ErrorMessage", - "documentation": "

The error message returned by this ForbiddenException error.

" - } - }, - "documentation": "

There are insufficient privileges to make the call.

", - "exception": true, - "error": { - "code": "ForbiddenException", - "httpStatusCode": 403, - "senderFault": true - } - }, - "Value": { - "type": "structure", - "members": { - "arrayValues": { - "shape": "ArrayValueList", - "documentation": "

An array of column values.

" - }, - "bigIntValue": { - "shape": "BoxedLong", - "documentation": "

A value for a column of big integer data type.

" - }, - "bitValue": { - "shape": "BoxedBoolean", - "documentation": "

A value for a column of BIT data type.

" - }, - "blobValue": { - "shape": "Blob", - "documentation": "

A value for a column of BLOB data type.

" - }, - "doubleValue": { - "shape": "BoxedDouble", - "documentation": "

A value for a column of double data type.

" - }, - "intValue": { - "shape": "BoxedInteger", - "documentation": "

A value for a column of integer data type.

" - }, - "isNull": { - "shape": "BoxedBoolean", - "documentation": "

A NULL value.

" - }, - "realValue": { - "shape": "BoxedFloat", - "documentation": "

A value for a column of real data type.

" - }, - "stringValue": { - "shape": "String", - "documentation": "

A value for a column of string data type.

" - }, - "structValue": { - "shape": "StructValue", - "documentation": "

A value for a column of STRUCT data type.

" - } - }, - "documentation": "

Contains the value of a column.

" - }, - "FieldList": { - "type": "list", - "member": { - "shape": "Field" - } - }, - "StatementTimeoutException": { - "type": "structure", - "members": { - "dbConnectionId": { - "shape": "Long", - "documentation": "

The database connection ID that executed the SQL statement.

" - }, - "message": { - "shape": "ErrorMessage", - "documentation": "

The error message returned by this StatementTimeoutException error.

" - } - }, - "documentation": "

The execution of the SQL statement timed out.

", - "exception": true, - "error": { - "code": "StatementTimeoutException", - "httpStatusCode": 400, - "senderFault": true - } - }, - "ExecuteStatementResponse": { - "type": "structure", - "members": { - "columnMetadata": { - "shape": "Metadata", - "documentation": "

Metadata for the columns included in the results.

" - }, - "generatedFields": { - "shape": "FieldList", - "documentation": "

Values for fields generated during the request.

" - }, - "numberOfRecordsUpdated": { - "shape": "RecordsUpdated", - "documentation": "

The number of records updated by the request.

" - }, - "records": { - "shape": "SqlRecords", - "documentation": "

The records returned by the SQL statement.

" - } - }, - "documentation": "

The response elements represent the output of a request to run a SQL statement against\n a database.

" - }, - "BoxedBoolean": { - "type": "boolean", - "box": true - }, - "RecordsUpdated": { - "type": "long" - }, - "UpdateResult": { - "type": "structure", - "members": { - "generatedFields": { - "shape": "FieldList", - "documentation": "

Values for fields generated during the request.

" - } - }, - "documentation": "

The response elements represent the results of an update.

" - }, - "SqlParametersList": { - "type": "list", - "member": { - "shape": "SqlParameter" - } - }, - "ParameterName": { - "type": "string" - }, - "Metadata": { - "type": "list", - "member": { - "shape": "ColumnMetadata" - } - }, - "ExecuteSqlResponse": { - "type": "structure", - "members": { - "sqlStatementResults": { - "shape": "SqlStatementResults", - "documentation": "

The results of the SQL statement or statements.

" - } - }, - "documentation": "

The response elements represent the output of a request to run one or more SQL\n statements.

" - }, - "SqlStatementResult": { - "type": "structure", - "members": { - "numberOfRecordsUpdated": { - "shape": "RecordsUpdated", - "documentation": "

The number of records updated by a SQL statement.

" - }, - "resultFrame": { - "shape": "ResultFrame", - "documentation": "

The result set of the SQL statement.

" - } - }, - "documentation": "

The result of a SQL statement.

" - }, - "BeginTransactionRequest": { - "type": "structure", - "required": [ - "resourceArn", - "secretArn" - ], - "members": { - "database": { - "shape": "DbName", - "documentation": "

The name of the database.

" - }, - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" - }, - "schema": { - "shape": "DbName", - "documentation": "

The name of the database schema.

" - }, - "secretArn": { - "shape": "Arn", - "documentation": "

The name or ARN of the secret that enables access to the DB cluster.

" - } - }, - "documentation": "

The request parameters represent the input of a request to start a SQL\n transaction.

" - }, - "RollbackTransactionResponse": { - "type": "structure", - "members": { - "transactionStatus": { - "shape": "TransactionStatus", - "documentation": "

The status of the rollback operation.

" - } - }, - "documentation": "

The response elements represent the output of a request to perform a rollback of a\n transaction.

" - }, - "BatchExecuteStatementResponse": { - "type": "structure", - "members": { - "updateResults": { - "shape": "UpdateResults", - "documentation": "

The execution results of each batch entry.

" - } - }, - "documentation": "

The response elements represent the output of a SQL statement over an array of\n data.

" - }, - "ResultSetMetadata": { - "type": "structure", - "members": { - "columnCount": { - "shape": "Long", - "documentation": "

The number of columns in the result set.

" - }, - "columnMetadata": { - "shape": "Metadata", - "documentation": "

The metadata of the columns in the result set.

" - } - }, - "documentation": "

The metadata of the result set returned by a SQL statement.

" - }, - "Records": { - "type": "list", - "member": { - "shape": "Record" - } - }, - "ExecuteStatementRequest": { - "type": "structure", - "required": [ + "ExecuteStatementRequest":{ + "type":"structure", + "required":[ "resourceArn", "secretArn", "sql" ], - "members": { - "continueAfterTimeout": { - "shape": "Boolean", - "documentation": "

A value that indicates whether to continue running the statement after \n the call times out. By default, the statement stops running when the call \n times out.

\n \n

For DDL statements, we recommend continuing to run the statement after \n the call times out. When a DDL statement terminates before it is finished \n running, it can result in errors and possibly corrupted data structures.

\n
" + "members":{ + "continueAfterTimeout":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether to continue running the statement after the call times out. By default, the statement stops running when the call times out.

For DDL statements, we recommend continuing to run the statement after the call times out. When a DDL statement terminates before it is finished running, it can result in errors and possibly corrupted data structures.

" }, - "database": { - "shape": "DbName", - "documentation": "

The name of the database.

" + "database":{ + "shape":"DbName", + "documentation":"

The name of the database.

" }, - "includeResultMetadata": { - "shape": "Boolean", - "documentation": "

A value that indicates whether to include metadata in the results.

" + "includeResultMetadata":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether to include metadata in the results.

" }, - "parameters": { - "shape": "SqlParametersList", - "documentation": "

The parameters for the SQL statement.

" + "parameters":{ + "shape":"SqlParametersList", + "documentation":"

The parameters for the SQL statement.

" }, - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" }, - "schema": { - "shape": "DbName", - "documentation": "

The name of the database schema.

" + "resultSetOptions":{ + "shape":"ResultSetOptions", + "documentation":"

Options that control how the result set is returned.

" }, - "secretArn": { - "shape": "Arn", - "documentation": "

The name or ARN of the secret that enables access to the DB cluster.

" + "schema":{ + "shape":"DbName", + "documentation":"

The name of the database schema.

" }, - "sql": { - "shape": "SqlStatement", - "documentation": "

The SQL statement to run.

" + "secretArn":{ + "shape":"Arn", + "documentation":"

The name or ARN of the secret that enables access to the DB cluster.

" }, - "transactionId": { - "shape": "Id", - "documentation": "

The identifier of a transaction that was started by using the\n BeginTransaction operation. Specify the transaction ID of the\n transaction that you want to include the SQL statement in.

\n

If the SQL statement is not part of a transaction, don't set this parameter.

" + "sql":{ + "shape":"SqlStatement", + "documentation":"

The SQL statement to run.

" + }, + "transactionId":{ + "shape":"Id", + "documentation":"

The identifier of a transaction that was started by using the BeginTransaction operation. Specify the transaction ID of the transaction that you want to include the SQL statement in.

If the SQL statement is not part of a transaction, don't set this parameter.

" } }, - "documentation": "

The request parameters represent the input of a request to run a SQL statement against\n a database.

" + "documentation":"

The request parameters represent the input of a request to run a SQL statement against a database.

" }, - "Blob": { - "type": "blob" - }, - "String": { - "type": "string" - }, - "BeginTransactionResponse": { - "type": "structure", - "members": { - "transactionId": { - "shape": "Id", - "documentation": "

The transaction ID of the transaction started by the call.

" + "ExecuteStatementResponse":{ + "type":"structure", + "members":{ + "columnMetadata":{ + "shape":"Metadata", + "documentation":"

Metadata for the columns included in the results.

" + }, + "generatedFields":{ + "shape":"FieldList", + "documentation":"

Values for fields generated during the request.

 <note> <p>The <code>generatedFields</code> data isn't supported by Aurora PostgreSQL. To get the values of generated fields, use the <code>RETURNING</code> clause. For more information, see <a href="https://www.postgresql.org/docs/10/dml-returning.html">Returning Data From Modified Rows</a> in the PostgreSQL documentation.</p> </note> 
" + }, + "numberOfRecordsUpdated":{ + "shape":"RecordsUpdated", + "documentation":"

The number of records updated by the request.

" + }, + "records":{ + "shape":"SqlRecords", + "documentation":"

The records returned by the SQL statement.

" } }, - "documentation": "

The response elements represent the output of a request to start a SQL\n transaction.

" + "documentation":"

The response elements represent the output of a request to run a SQL statement against a database.

" }, - "Id": { - "type": "string", - "max": 192 + "Field":{ + "type":"structure", + "members":{ + "arrayValue":{ + "shape":"ArrayValue", + "documentation":"

An array of values.

" + }, + "blobValue":{ + "shape":"Blob", + "documentation":"

A value of BLOB data type.

" + }, + "booleanValue":{ + "shape":"BoxedBoolean", + "documentation":"

A value of Boolean data type.

" + }, + "doubleValue":{ + "shape":"BoxedDouble", + "documentation":"

A value of double data type.

" + }, + "isNull":{ + "shape":"BoxedBoolean", + "documentation":"

A NULL value.

" + }, + "longValue":{ + "shape":"BoxedLong", + "documentation":"

A value of long data type.

" + }, + "stringValue":{ + "shape":"String", + "documentation":"

A value of string data type.

" + } + }, + "documentation":"

Contains a value.

" }, - "CommitTransactionRequest": { - "type": "structure", - "required": [ + "FieldList":{ + "type":"list", + "member":{"shape":"Field"} + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message returned by this ForbiddenException error.

" + } + }, + "documentation":"

There are insufficient privileges to make the call.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Id":{ + "type":"string", + "max":192, + "min":0 + }, + "Integer":{"type":"integer"}, + "InternalServerErrorException":{ + "type":"structure", + "members":{ + }, + "documentation":"

An internal error occurred.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "Long":{"type":"long"}, + "LongArray":{ + "type":"list", + "member":{"shape":"BoxedLong"}, + "documentation":"

An array of integers.

Some array entries can be null.

" + }, + "Metadata":{ + "type":"list", + "member":{"shape":"ColumnMetadata"} + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message returned by this NotFoundException error.

" + } + }, + "documentation":"

The resourceArn, secretArn, or transactionId value can't be found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ParameterName":{"type":"string"}, + "Record":{ + "type":"structure", + "members":{ + "values":{ + "shape":"Row", + "documentation":"

The values returned in the record.

" + } + }, + "documentation":"

A record returned by a call.

" + }, + "Records":{ + "type":"list", + "member":{"shape":"Record"} + }, + "RecordsUpdated":{"type":"long"}, + "ResultFrame":{ + "type":"structure", + "members":{ + "records":{ + "shape":"Records", + "documentation":"

The records in the result set.

" + }, + "resultSetMetadata":{ + "shape":"ResultSetMetadata", + "documentation":"

The result-set metadata in the result set.

" + } + }, + "documentation":"

The result set returned by a SQL statement.

" + }, + "ResultSetMetadata":{ + "type":"structure", + "members":{ + "columnCount":{ + "shape":"Long", + "documentation":"

The number of columns in the result set.

" + }, + "columnMetadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the columns in the result set.

" + } + }, + "documentation":"

The metadata of the result set returned by a SQL statement.

" + }, + "ResultSetOptions":{ + "type":"structure", + "members":{ + "decimalReturnType":{ + "shape":"DecimalReturnType", + "documentation":"

A value that indicates how a field of DECIMAL type is represented in the response. The value of STRING, the default, specifies that it is converted to a String value. The value of DOUBLE_OR_LONG specifies that it is converted to a Long value if its scale is 0, or to a Double value otherwise.

Conversion to Double or Long can result in roundoff errors due to precision loss. We recommend converting to String, especially when working with currency values.

" + } + }, + "documentation":"

Options that control how the result set is returned.

" + }, + "RollbackTransactionRequest":{ + "type":"structure", + "required":[ "resourceArn", "secretArn", "transactionId" ], - "members": { - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

" }, - "secretArn": { - "shape": "Arn", - "documentation": "

The name or ARN of the secret that enables access to the DB cluster.

" + "secretArn":{ + "shape":"Arn", + "documentation":"

The name or ARN of the secret that enables access to the DB cluster.

" }, - "transactionId": { - "shape": "Id", - "documentation": "

The identifier of the transaction to end and commit.

" + "transactionId":{ + "shape":"Id", + "documentation":"

The identifier of the transaction to roll back.

" } }, - "documentation": "

The request parameters represent the input of a commit transaction request.

" + "documentation":"

The request parameters represent the input of a request to perform a rollback of a transaction.

" }, - "ServiceUnavailableError": { - "type": "structure", - "members": { }, - "documentation": "

The service specified by the resourceArn parameter is not\n available.

", - "exception": true, - "error": { - "code": "ServiceUnavailableError", - "httpStatusCode": 503, - "fault": true - } - }, - "ColumnMetadata": { - "type": "structure", - "members": { - "arrayBaseColumnType": { - "shape": "Integer", - "documentation": "

The type of the column.

" - }, - "isAutoIncrement": { - "shape": "Boolean", - "documentation": "

A value that indicates whether the column increments automatically.

" - }, - "isCaseSensitive": { - "shape": "Boolean", - "documentation": "

A value that indicates whether the column is case-sensitive.

" - }, - "isCurrency": { - "shape": "Boolean", - "documentation": "

A value that indicates whether the column contains currency values.

" - }, - "isSigned": { - "shape": "Boolean", - "documentation": "

A value that indicates whether an integer column is signed.

" - }, - "label": { - "shape": "String", - "documentation": "

The label for the column.

" - }, - "name": { - "shape": "String", - "documentation": "

The name of the column.

" - }, - "nullable": { - "shape": "Integer", - "documentation": "

A value that indicates whether the column is nullable.

" - }, - "precision": { - "shape": "Integer", - "documentation": "

The precision value of a decimal number column.

" - }, - "scale": { - "shape": "Integer", - "documentation": "

The scale value of a decimal number column.

" - }, - "schemaName": { - "shape": "String", - "documentation": "

The name of the schema that owns the table that includes the column.

" - }, - "tableName": { - "shape": "String", - "documentation": "

The name of the table that includes the column.

" - }, - "type": { - "shape": "Integer", - "documentation": "

The type of the column.

" - }, - "typeName": { - "shape": "String", - "documentation": "

The database-specific data type of the column.

" + "RollbackTransactionResponse":{ + "type":"structure", + "members":{ + "transactionStatus":{ + "shape":"TransactionStatus", + "documentation":"

The status of the rollback operation.

" } }, - "documentation": "

Contains the metadata for a column.

" + "documentation":"

The response elements represent the output of a request to perform a rollback of a transaction.

" }, - "DbName": { - "type": "string", - "max": 64 + "Row":{ + "type":"list", + "member":{"shape":"Value"} }, - "InternalServerErrorException": { - "type": "structure", - "members": { }, - "documentation": "

An internal error occurred.

", - "exception": true, - "error": { - "code": "InternalServerErrorException", - "httpStatusCode": 500, - "fault": true - } + "ServiceUnavailableError":{ + "type":"structure", + "members":{ + }, + "documentation":"

The service specified by the resourceArn parameter is not available.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "SqlParameter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ParameterName", + "documentation":"

The name of the parameter.

" + }, + "value":{ + "shape":"Field", + "documentation":"

The value of the parameter.

" + } + }, + "documentation":"

A parameter used in a SQL statement.

" + }, + "SqlParameterSets":{ + "type":"list", + "member":{"shape":"SqlParametersList"} + }, + "SqlParametersList":{ + "type":"list", + "member":{"shape":"SqlParameter"} + }, + "SqlRecords":{ + "type":"list", + "member":{"shape":"FieldList"} + }, + "SqlStatement":{ + "type":"string", + "max":65536, + "min":0 + }, + "SqlStatementResult":{ + "type":"structure", + "members":{ + "numberOfRecordsUpdated":{ + "shape":"RecordsUpdated", + "documentation":"

The number of records updated by a SQL statement.

" + }, + "resultFrame":{ + "shape":"ResultFrame", + "documentation":"

The result set of the SQL statement.

" + } + }, + "documentation":"

The result of a SQL statement.

 <important> <p>This data type is deprecated.</p> </important> 
" + }, + "SqlStatementResults":{ + "type":"list", + "member":{"shape":"SqlStatementResult"} + }, + "StatementTimeoutException":{ + "type":"structure", + "members":{ + "dbConnectionId":{ + "shape":"Long", + "documentation":"

The database connection ID that executed the SQL statement.

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message returned by this StatementTimeoutException error.

" + } + }, + "documentation":"

The execution of the SQL statement timed out.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "StringArray":{ + "type":"list", + "member":{"shape":"String"}, + "documentation":"

An array of strings.

Some array entries can be null.

" + }, + "StructValue":{ + "type":"structure", + "members":{ + "attributes":{ + "shape":"ArrayValueList", + "documentation":"

The attributes returned in the record.

" + } + }, + "documentation":"

A structure value returned by a call.

" + }, + "TransactionStatus":{ + "type":"string", + "max":128, + "min":0 + }, + "UpdateResult":{ + "type":"structure", + "members":{ + "generatedFields":{ + "shape":"FieldList", + "documentation":"

Values for fields generated during the request.

" + } + }, + "documentation":"

The response elements represent the results of an update.

" + }, + "UpdateResults":{ + "type":"list", + "member":{"shape":"UpdateResult"} + }, + "Value":{ + "type":"structure", + "members":{ + "arrayValues":{ + "shape":"ArrayValueList", + "documentation":"

An array of column values.

" + }, + "bigIntValue":{ + "shape":"BoxedLong", + "documentation":"

A value for a column of big integer data type.

" + }, + "bitValue":{ + "shape":"BoxedBoolean", + "documentation":"

A value for a column of BIT data type.

" + }, + "blobValue":{ + "shape":"Blob", + "documentation":"

A value for a column of BLOB data type.

" + }, + "doubleValue":{ + "shape":"BoxedDouble", + "documentation":"

A value for a column of double data type.

" + }, + "intValue":{ + "shape":"BoxedInteger", + "documentation":"

A value for a column of integer data type.

" + }, + "isNull":{ + "shape":"BoxedBoolean", + "documentation":"

A NULL value.

" + }, + "realValue":{ + "shape":"BoxedFloat", + "documentation":"

A value for a column of real data type.

" + }, + "stringValue":{ + "shape":"String", + "documentation":"

A value for a column of string data type.

" + }, + "structValue":{ + "shape":"StructValue", + "documentation":"

A value for a column of STRUCT data type.

" + } + }, + "documentation":"

Contains the value of a column.

 <important> <p>This data type is deprecated.</p> </important> 
" } - } + }, + "documentation":"

Amazon RDS Data Service

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora Serverless DB cluster. To run these statements, you work with the Data Service API.

For more information about the Data Service API, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide.

If you have questions or comments related to the Data API, send email to Rds-data-api-feedback@amazon.com.

" } diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 4d41d726..f0a05f6c 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -250,7 +250,8 @@ {"shape":"DBInstanceNotFoundFault"}, {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, {"shape":"GlobalClusterNotFoundFault"}, - {"shape":"InvalidGlobalClusterStateFault"} + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"DomainNotFoundFault"} ], "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, @@ -375,7 +376,8 @@ {"shape":"DBSubnetGroupNotAllowedFault"}, {"shape":"InvalidDBSubnetGroupFault"}, {"shape":"StorageTypeNotSupportedFault"}, - {"shape":"KMSKeyNotAccessibleFault"} + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DomainNotFoundFault"} ], "documentation":"

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, Oracle, or PostgreSQL. For more information, see Working with Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified following.

Your source DB instance must have backup retention enabled.

" }, @@ -1317,7 +1319,8 @@ {"shape":"DBClusterParameterGroupNotFoundFault"}, {"shape":"InvalidDBSecurityGroupStateFault"}, {"shape":"InvalidDBInstanceStateFault"}, - {"shape":"DBClusterAlreadyExistsFault"} + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DomainNotFoundFault"} ], "documentation":"

Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, @@ -1738,6 +1741,7 @@ {"shape":"DBClusterParameterGroupNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBClusterNotFoundFault"}, + {"shape":"DomainNotFoundFault"}, {"shape":"InsufficientStorageClusterCapacityFault"} ], "documentation":"

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data to an Amazon Aurora MySQL DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" @@ -1771,6 +1775,7 @@ {"shape":"InvalidSubnet"}, {"shape":"OptionGroupNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DomainNotFoundFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" @@ -1803,6 +1808,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"OptionGroupNotFoundFault"}, {"shape":"StorageQuotaExceededFault"}, + {"shape":"DomainNotFoundFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" @@ -2671,7 +2677,7 @@ }, "EndpointType":{ "shape":"String", - "documentation":"

The type of the endpoint. One of: READER, ANY.

" + "documentation":"

The type of the endpoint. One of: READER, WRITER, ANY.

" }, "StaticMembers":{ "shape":"StringList", @@ -2728,7 +2734,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

Aurora MySQL

Example: 5.6.10a, 5.7.12

Aurora PostgreSQL

Example: 9.6.3

" + "documentation":"

The version number of the database engine to use.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5

Aurora PostgreSQL

Example: 9.6.3, 10.7

" }, "Port":{ "shape":"IntegerOptional", @@ -2776,7 +2782,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

" }, "BacktrackWindow":{ "shape":"LongOptional", @@ -2788,7 +2794,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, or global.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", @@ -2802,6 +2808,10 @@ "shape":"String", "documentation":"

The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.

" }, + "EnableHttpEndpoint":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.

When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.

For more information, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide.

" + }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", "documentation":"

A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

" @@ -2934,7 +2944,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

" + "documentation":"

The name of the DB parameter group to associate with this DB instance. If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

Constraints:

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -2966,7 +2976,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance.

" + "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance.

" }, "OptionGroupName":{ "shape":"String", @@ -3010,7 +3020,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

For an Amazon RDS DB instance that's running Microsoft SQL Server, this parameter specifies the Active Directory directory ID to create the instance in. Amazon RDS uses Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

" + "documentation":"

The Active Directory directory ID to create the DB instance in. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instance, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -3038,7 +3048,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

MySQL

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

MySQL

PostgreSQL

For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -3114,6 +3124,10 @@ "shape":"String", "documentation":"

The option group the DB instance is associated with. If omitted, the option group associated with the source instance is used.

" }, + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same region Read Replica, or the default DBParameterGroup for the specified DB engine for a cross region Read Replica.

Constraints:

" + }, "PubliclyAccessible":{ "shape":"BooleanOptional", "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance is not publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" @@ -3153,7 +3167,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -3182,6 +3196,14 @@ "DeletionProtection":{ "shape":"BooleanOptional", "documentation":"

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see Deleting a DB Instance.

" + }, + "Domain":{ + "shape":"String", + "documentation":"

The Active Directory directory ID to create the DB instance in.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" + }, + "DomainIAMRoleName":{ + "shape":"String", + "documentation":"

Specify the name of the IAM role to be used when making API calls to the Directory Service.

" } } }, @@ -3603,7 +3625,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

" }, "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"}, "DeletionProtection":{ @@ -3771,7 +3793,7 @@ }, "CustomEndpointType":{ "shape":"String", - "documentation":"

The type associated with a custom endpoint. One of: READER, ANY.

" + "documentation":"

The type associated with a custom endpoint. One of: READER, WRITER, ANY.

" }, "StaticMembers":{ "shape":"StringList", @@ -3860,7 +3882,7 @@ }, "IsClusterWriter":{ "shape":"Boolean", - "documentation":"

A value that indicates whehter the cluster member is the primary instance for the DB cluster.

" + "documentation":"

Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

" }, "DBClusterParameterGroupStatus":{ "shape":"String", @@ -4961,7 +4983,7 @@ "members":{ "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DP parameter group.

" + "documentation":"

The name of the DB parameter group.

" }, "ParameterApplyStatus":{ "shape":"String", @@ -5826,7 +5848,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

This parameter is not currently supported.

" + "documentation":"

A filter that specifies one or more DB cluster snapshots to describe.

Supported filters:

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -5953,7 +5975,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

A filter that specifies one or more DB instances to describe.

Supported filters:

" + "documentation":"

A filter that specifies one or more DB instances to describe.

Supported filters:

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -6144,7 +6166,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

This parameter is not currently supported.

" + "documentation":"

A filter that specifies one or more DB snapshots to describe.

Supported filters:

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -6497,6 +6519,10 @@ "shape":"BooleanOptional", "documentation":"

A value that indicates whether to show only those reservations that support Multi-AZ.

" }, + "LeaseId":{ + "shape":"String", + "documentation":"

The lease identifier filter value. Specify this parameter to show only the reservation that matches the specified lease ID.

AWS Support might request the lease ID for an issue related to a reserved DB instance.

" + }, "Filters":{ "shape":"FilterList", "documentation":"

This parameter is not currently supported.

" @@ -7517,7 +7543,7 @@ }, "EndpointType":{ "shape":"String", - "documentation":"

The type of the endpoint. One of: READER, ANY.

" + "documentation":"

The type of the endpoint. One of: READER, WRITER, ANY.

" }, "StaticMembers":{ "shape":"StringList", @@ -7579,7 +7605,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

" }, "BacktrackWindow":{ "shape":"LongOptional", @@ -7591,7 +7617,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

For a list of valid engine versions, use DescribeDBEngineVersions.

" + "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

" }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", @@ -7778,7 +7804,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

The Active Directory Domain to move the instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently only a Microsoft SQL Server instance can be created in a Active Directory Domain.

" + "documentation":"

The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -7810,7 +7836,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see ModifyDBCluster.

MySQL

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -7862,7 +7888,7 @@ "members":{ "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group.

Constraints:

" + "documentation":"

The name of the DB parameter group.

Constraints:

" }, "Parameters":{ "shape":"ParametersList", @@ -8567,6 +8593,10 @@ "SupportsStorageAutoscaling":{ "shape":"BooleanOptional", "documentation":"

Whether or not Amazon RDS can automatically scale storage for DB instances that use the specified instance class.

" + }, + "SupportsKerberosAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

Whether a DB instance supports Kerberos Authentication.

" } }, "documentation":"

Contains a list of available options for a DB instance.

This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

", @@ -8673,11 +8703,11 @@ }, "AutoAppliedAfterDate":{ "shape":"TStamp", - "documentation":"

The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.

" + "documentation":"

The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date.

" }, "ForcedApplyDate":{ "shape":"TStamp", - "documentation":"

The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.

" + "documentation":"

The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource.

" }, "OptInStatus":{ "shape":"String", @@ -8845,7 +8875,7 @@ }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

" + "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

" }, "PreferredBackupWindow":{ "shape":"String", @@ -9157,6 +9187,10 @@ "ReservedDBInstanceArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) for the reserved DB instance.

" + }, + "LeaseId":{ + "shape":"String", + "documentation":"

The unique identifier for the lease associated with the reserved DB instance.

AWS Support might request the lease ID for an issue related to a reserved DB instance.

" } }, "documentation":"

This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

", @@ -9324,7 +9358,7 @@ "members":{ "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group.

Constraints:

" + "documentation":"

The name of the DB parameter group.

Constraints:

" }, "ResetAllParameters":{ "shape":"Boolean", @@ -9415,7 +9449,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

Aurora MySQL

Example: 5.6.10a

Aurora PostgreSQL

Example: 9.6.3

" + "documentation":"

The version number of the database engine to use.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5

Aurora PostgreSQL

Example: 9.6.3, 10.7

" }, "Port":{ "shape":"IntegerOptional", @@ -9452,7 +9486,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

" }, "SourceEngine":{ "shape":"String", @@ -9524,7 +9558,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version of the database engine to use for the new DB cluster.

" + "documentation":"

The version of the database engine to use for the new DB cluster.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5

Aurora PostgreSQL

Example: 9.6.3, 10.7

" }, "Port":{ "shape":"IntegerOptional", @@ -9556,7 +9590,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

" }, "BacktrackWindow":{ "shape":"LongOptional", @@ -9568,7 +9602,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", @@ -9645,7 +9679,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

" }, "BacktrackWindow":{ "shape":"LongOptional", @@ -9758,7 +9792,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

Specify the Active Directory Domain to restore the instance in.

" + "documentation":"

Specify the Active Directory directory ID to restore the DB instance in. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -9770,7 +9804,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -9786,7 +9820,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

" + "documentation":"

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

Constraints:

" }, "DeletionProtection":{ "shape":"BooleanOptional", @@ -9863,7 +9897,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default parameter group for the specified engine is used.

" + "documentation":"

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -9895,7 +9929,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

" + "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

" }, "OptionGroupName":{ "shape":"String", @@ -9935,7 +9969,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "SourceEngine":{ "shape":"String", @@ -10084,7 +10118,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

Specify the Active Directory Domain to restore the instance in.

" + "documentation":"

Specify the Active Directory directory ID to restore the DB instance in. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -10092,7 +10126,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -10108,7 +10142,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

" + "documentation":"

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used.

Constraints:

" }, "DeletionProtection":{ "shape":"BooleanOptional", diff --git a/botocore/data/rds/2014-10-31/waiters-2.json b/botocore/data/rds/2014-10-31/waiters-2.json index c698be52..57accf12 100644 --- a/botocore/data/rds/2014-10-31/waiters-2.json +++ b/botocore/data/rds/2014-10-31/waiters-2.json @@ -50,10 +50,10 @@ "maxAttempts": 60, "acceptors": [ { - "expected": "deleted", - "matcher": "pathAll", + "expected": true, + "matcher": "path", "state": "success", - "argument": "DBInstances[].DBInstanceStatus" + "argument": "length(DBInstances) == `0`" }, { "expected": "DBInstanceNotFound", @@ -135,10 +135,10 @@ "maxAttempts": 60, "acceptors": [ { - "expected": "deleted", - "matcher": "pathAll", + "expected": true, + "matcher": "path", "state": "success", - "argument": "DBSnapshots[].Status" + "argument": "length(DBSnapshots) == `0`" }, { "expected": "DBSnapshotNotFound", @@ -171,6 +171,91 @@ } ] }, + "DBClusterSnapshotAvailable": { + "delay": 30, + "operation": "DescribeDBClusterSnapshots", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + } + ] + }, + "DBClusterSnapshotDeleted": { + "delay": 30, + "operation": "DescribeDBClusterSnapshots", + "maxAttempts": 60, + "acceptors": [ + { + "expected": true, + "matcher": "path", + "state": "success", + "argument": "length(DBClusterSnapshots) == `0`" + }, + { + "expected": "DBClusterSnapshotNotFoundFault", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBClusterSnapshots[].Status" + } + ] + }, "DBSnapshotCompleted": { "delay": 15, "operation": "DescribeDBSnapshots", diff --git a/botocore/data/redshift/2012-12-01/paginators-1.json b/botocore/data/redshift/2012-12-01/paginators-1.json index e423444e..614516d3 100644 --- a/botocore/data/redshift/2012-12-01/paginators-1.json +++ b/botocore/data/redshift/2012-12-01/paginators-1.json @@ -131,6 +131,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "ReservedNodeOfferings" + }, + "DescribeNodeConfigurationOptions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "NodeConfigurationOptionList" } } } diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index a5d24288..2a1a913d 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -807,6 +807,23 @@ ], "documentation":"

Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster.

" }, + "DescribeNodeConfigurationOptions":{ + "name":"DescribeNodeConfigurationOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNodeConfigurationOptionsMessage"}, + "output":{ + "shape":"NodeConfigurationOptionsMessage", + "resultWrapper":"DescribeNodeConfigurationOptionsResult" + }, + "errors":[ + {"shape":"ClusterSnapshotNotFoundFault"}, + {"shape":"InvalidClusterSnapshotStateFault"} + ], + "documentation":"

Returns properties of possible node configurations such as node type, number of nodes, and disk usage for the specified action type.

" + }, "DescribeOrderableClusterOptions":{ "name":"DescribeOrderableClusterOptions", "http":{ @@ -1561,6 +1578,10 @@ "locationName":"AccountWithRestoreAccess" } }, + "ActionType":{ + "type":"string", + "enum":["restore-cluster"] + }, "AssociatedClusterList":{ "type":"list", "member":{ @@ -2009,6 +2030,10 @@ "shape":"String", "documentation":"

The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:

" }, + "NextMaintenanceWindowStartTime":{ + "shape":"TStamp", + "documentation":"

The date and time in UTC when system maintenance can begin.

" + }, "ResizeInfo":{ "shape":"ResizeInfo", "documentation":"

Returns the following:

" @@ -3841,6 +3866,37 @@ }, "documentation":"

" }, + "DescribeNodeConfigurationOptionsMessage":{ + "type":"structure", + "required":["ActionType"], + "members":{ + "ActionType":{ + "shape":"ActionType", + "documentation":"

The action type to evaluate for possible node configurations. Currently, it must be \"restore-cluster\".

" + }, + "SnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the snapshot to evaluate for possible node configurations.

" + }, + "OwnerAccount":{ + "shape":"String", + "documentation":"

The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

" + }, + "Filters":{ + "shape":"NodeConfigurationOptionsFilterList", + "documentation":"

A set of name, operator, and value items to filter the results.

", + "locationName":"Filter" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeNodeConfigurationOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 500

Constraints: minimum 100, maximum 500.

" + } + } + }, "DescribeOrderableClusterOptionsMessage":{ "type":"structure", "members":{ @@ -5425,6 +5481,78 @@ } } }, + "NodeConfigurationOption":{ + "type":"structure", + "members":{ + "NodeType":{ + "shape":"String", + "documentation":"

The node type, such as, \"ds2.8xlarge\".

" + }, + "NumberOfNodes":{ + "shape":"Integer", + "documentation":"

The number of nodes.

" + }, + "EstimatedDiskUtilizationPercent":{ + "shape":"DoubleOptional", + "documentation":"

The estimated disk utilizaton percentage.

" + } + }, + "documentation":"

A list of node configurations.

" + }, + "NodeConfigurationOptionList":{ + "type":"list", + "member":{ + "shape":"NodeConfigurationOption", + "locationName":"NodeConfigurationOption" + } + }, + "NodeConfigurationOptionsFilter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NodeConfigurationOptionsFilterName", + "documentation":"

The name of the element to filter.

" + }, + "Operator":{ + "shape":"OperatorType", + "documentation":"

The filter operator. If filter Name is NodeType only the 'in' operator is supported. Provide one value to evaluate for 'eq', 'lt', 'le', 'gt', and 'ge'. Provide two values to evaluate for 'between'. Provide a list of values for 'in'.

" + }, + "Values":{ + "shape":"ValueStringList", + "documentation":"

List of values. Compare Name using Operator to Values. If filter Name is NumberOfNodes, then values can range from 0 to 200. If filter Name is EstimatedDiskUtilizationPercent, then values can range from 0 to 100. For example, filter NumberOfNodes (name) GT (operator) 3 (values).

", + "locationName":"Value" + } + }, + "documentation":"

A set of elements to filter the returned node configurations.

" + }, + "NodeConfigurationOptionsFilterList":{ + "type":"list", + "member":{ + "shape":"NodeConfigurationOptionsFilter", + "locationName":"NodeConfigurationOptionsFilter" + } + }, + "NodeConfigurationOptionsFilterName":{ + "type":"string", + "enum":[ + "NodeType", + "NumberOfNodes", + "EstimatedDiskUtilizationPercent" + ] + }, + "NodeConfigurationOptionsMessage":{ + "type":"structure", + "members":{ + "NodeConfigurationOptionList":{ + "shape":"NodeConfigurationOptionList", + "documentation":"

A list of valid node configurations.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

" + } + } + }, "NumberOfNodesPerClusterLimitExceededFault":{ "type":"structure", "members":{ @@ -5449,6 +5577,18 @@ }, "exception":true }, + "OperatorType":{ + "type":"string", + "enum":[ + "eq", + "lt", + "gt", + "le", + "ge", + "in", + "between" + ] + }, "OrderableClusterOption":{ "type":"structure", "members":{ @@ -6156,6 +6296,10 @@ "SnapshotScheduleIdentifier":{ "shape":"String", "documentation":"

A unique identifier for the snapshot schedule.

" + }, + "NumberOfNodes":{ + "shape":"IntegerOptional", + "documentation":"

The number of nodes specified when provisioning the restored cluster.

" } }, "documentation":"

" @@ -7306,6 +7450,13 @@ }, "documentation":"

A maintenance track that you can switch the current track to.

" }, + "ValueStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, "VpcSecurityGroupIdList":{ "type":"list", "member":{ diff --git a/botocore/data/rekognition/2016-06-27/service-2.json b/botocore/data/rekognition/2016-06-27/service-2.json index 086dde67..f81e973d 100644 --- a/botocore/data/rekognition/2016-06-27/service-2.json +++ b/botocore/data/rekognition/2016-06-27/service-2.json @@ -218,7 +218,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" + "documentation":"

Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" }, "DetectText":{ "name":"DetectText", @@ -294,7 +294,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" + "documentation":"

Gets the unsafe content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration.

Unsafe content analysis of a video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected unsafe content labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" }, "GetFaceDetection":{ "name":"GetFaceDetection", @@ -550,7 +550,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

", + "documentation":"

Starts asynchronous detection of unsafe content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When unsafe content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the unsafe content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartFaceDetection":{ @@ -969,14 +969,14 @@ "members":{ "Timestamp":{ "shape":"Timestamp", - "documentation":"

Time, in milliseconds from the beginning of the video, that the moderation label was detected.

" + "documentation":"

Time, in milliseconds from the beginning of the video, that the unsafe content label was detected.

" }, "ModerationLabel":{ "shape":"ModerationLabel", - "documentation":"

The moderation label detected by in the stored video.

" + "documentation":"

The unsafe content label detected by in the stored video.

" } }, - "documentation":"

Information about a moderation label detection in a stored video.

" + "documentation":"

Information about an unsafe content label detection in a stored video.

" }, "ContentModerationDetections":{ "type":"list", @@ -1290,7 +1290,7 @@ "members":{ "ModerationLabels":{ "shape":"ModerationLabels", - "documentation":"

Array of detected Moderation labels and the time, in millseconds from the start of the video, they were detected.

" + "documentation":"

Array of detected Moderation labels and the time, in milliseconds from the start of the video, they were detected.

" }, "ModerationModelVersion":{ "shape":"String", @@ -1329,7 +1329,7 @@ "documentation":"

Level of confidence in the determination.

" } }, - "documentation":"

The emotions detected on the face, and the confidence level in the determination. For example, HAPPY, SAD, and ANGRY.

" + "documentation":"

The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.

" }, "EmotionName":{ "type":"string", @@ -1461,7 +1461,7 @@ }, "Emotions":{ "shape":"Emotions", - "documentation":"

The emotions detected on the face, and the confidence level in the determination. For example, HAPPY, SAD, and ANGRY.

" + "documentation":"

The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.

" }, "Landmarks":{ "shape":"Landmarks", @@ -1691,7 +1691,7 @@ "members":{ "JobId":{ "shape":"JobId", - "documentation":"

The identifier for the content moderation job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" + "documentation":"

The identifier for the unsafe content job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1699,7 +1699,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of content moderation labels.

" + "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of unsafe content labels.

" }, "SortBy":{ "shape":"ContentModerationSortBy", @@ -1712,7 +1712,7 @@ "members":{ "JobStatus":{ "shape":"VideoJobStatus", - "documentation":"

The current status of the content moderation job.

" + "documentation":"

The current status of the unsafe content analysis job.

" }, "StatusMessage":{ "shape":"StatusMessage", @@ -1724,11 +1724,11 @@ }, "ModerationLabels":{ "shape":"ContentModerationDetections", - "documentation":"

The detected moderation labels and the time(s) they were detected.

" + "documentation":"

The detected unsafe content labels and the time(s) they were detected.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of moderation labels.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of unsafe content labels.

" }, "ModerationModelVersion":{ "shape":"String", @@ -2351,14 +2351,14 @@ }, "Name":{ "shape":"String", - "documentation":"

The label name for the type of content detected in the image.

" + "documentation":"

The label name for the type of unsafe content detected in the image.

" }, "ParentName":{ "shape":"String", "documentation":"

The name for the parent label. Labels at the top level of the hierarchy have the parent label \"\".

" } }, - "documentation":"

Provides information about a single type of moderated content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" + "documentation":"

Provides information about a single type of unsafe content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" }, "ModerationLabels":{ "type":"list", @@ -2693,7 +2693,7 @@ }, "FaceMatchThreshold":{ "shape":"Percent", - "documentation":"

(Optional) Specifies the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%.

" + "documentation":"

(Optional) Specifies the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%. The default value is 80%.

" } } }, @@ -2739,7 +2739,7 @@ }, "FaceMatchThreshold":{ "shape":"Percent", - "documentation":"

Optional value specifying the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%.

" + "documentation":"

Optional value specifying the minimum confidence in the face match to return. For example, don't return any matches where confidence in matches is less than 70%. The default value is 80%.

" } } }, @@ -2792,7 +2792,7 @@ }, "JobTag":{ "shape":"JobTag", - "documentation":"

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

" + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" } } }, @@ -2811,7 +2811,7 @@ "members":{ "Video":{ "shape":"Video", - "documentation":"

The video in which you want to moderate content. The video must be stored in an Amazon S3 bucket.

" + "documentation":"

The video in which you want to detect unsafe content. The video must be stored in an Amazon S3 bucket.

" }, "MinConfidence":{ "shape":"Percent", @@ -2823,11 +2823,11 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the content moderation analysis to.

" + "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the unsafe content analysis to.

" }, "JobTag":{ "shape":"JobTag", - "documentation":"

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

" + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" } } }, @@ -2836,7 +2836,7 @@ "members":{ "JobId":{ "shape":"JobId", - "documentation":"

The identifier for the content moderation analysis job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" + "documentation":"

The identifier for the unsafe content analysis job. Use JobId to identify the job in a subsequent call to GetContentModeration.

" } } }, @@ -2862,7 +2862,7 @@ }, "JobTag":{ "shape":"JobTag", - "documentation":"

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

" + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" } } }, @@ -2892,7 +2892,7 @@ }, "FaceMatchThreshold":{ "shape":"Percent", - "documentation":"

The minimum confidence in the person match to return. For example, don't return any matches where confidence in matches is less than 70%.

" + "documentation":"

The minimum confidence in the person match to return. For example, don't return any matches where confidence in matches is less than 70%. The default value is 80%.

" }, "CollectionId":{ "shape":"CollectionId", @@ -2904,7 +2904,7 @@ }, "JobTag":{ "shape":"JobTag", - "documentation":"

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

" + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" } } }, @@ -2939,7 +2939,7 @@ }, "JobTag":{ "shape":"JobTag", - "documentation":"

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

" + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" } } }, @@ -2970,7 +2970,7 @@ }, "JobTag":{ "shape":"JobTag", - "documentation":"

Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.

" + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" } } }, diff --git a/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json b/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json index 3529e47a..c8fe14c8 100644 --- a/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json +++ b/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json @@ -426,5 +426,5 @@ } } }, - "documentation":"Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

To make full use of the resource groups tagging API operations, you might need additional IAM permissions, including permission to access the resources of individual services as well as permission to view and apply tags to those resources. For more information, see Obtaining Permissions for Resource Groups and Tag Editor.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

" + "documentation":"Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

To use resource groups tagging API operations, you must add the following permissions to your IAM policy:

You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.

For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

" } diff --git a/botocore/data/robomaker/2018-06-29/service-2.json b/botocore/data/robomaker/2018-06-29/service-2.json index d257c8c5..e4008c75 100644 --- a/botocore/data/robomaker/2018-06-29/service-2.json +++ b/botocore/data/robomaker/2018-06-29/service-2.json @@ -648,6 +648,7 @@ } }, "Boolean":{"type":"boolean"}, + "BoxedBoolean":{"type":"boolean"}, "CancelDeploymentJobRequest":{ "type":"structure", "required":["job"], @@ -977,8 +978,7 @@ "name", "sources", "simulationSoftwareSuite", - "robotSoftwareSuite", - "renderingEngine" + "robotSoftwareSuite" ], "members":{ "name":{ @@ -1123,6 +1123,10 @@ "shape":"OutputLocation", "documentation":"

Location for output files generated by the simulation job.

" }, + "loggingConfig":{ + "shape":"LoggingConfig", + "documentation":"

The logging configuration.

" + }, "maxJobDurationInSeconds":{ "shape":"JobDuration", "documentation":"

The maximum simulation job duration in seconds (up to 14 days or 1,209,600 seconds. When maxJobDurationInSeconds is reached, the simulation job will status will transition to Completed.

" @@ -1143,6 +1147,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

The simulation application to use in the simulation job.

" }, + "dataSources":{ + "shape":"DataSourceConfigs", + "documentation":"

The data sources for the simulation job.

There is a limit of 100 files and a combined size of 25GB for all DataSourceConfig objects.

" + }, "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job.

" @@ -1188,6 +1196,10 @@ "shape":"OutputLocation", "documentation":"

Simulation job output files location.

" }, + "loggingConfig":{ + "shape":"LoggingConfig", + "documentation":"

The logging configuration.

" + }, "maxJobDurationInSeconds":{ "shape":"JobDuration", "documentation":"

The maximum simulation job duration in seconds.

" @@ -1208,6 +1220,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

The simulation application used by the simulation job.

" }, + "dataSources":{ + "shape":"DataSources", + "documentation":"

The data sources for the simulation job.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The list of all tags added to the simulation job.

" @@ -1219,6 +1235,61 @@ } }, "CreatedAt":{"type":"timestamp"}, + "DataSource":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the data source.

" + }, + "s3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The S3 bucket where the data files are located.

" + }, + "s3Keys":{ + "shape":"S3KeyOutputs", + "documentation":"

The list of S3 keys identifying the data source files.

" + } + }, + "documentation":"

Information about a data source.

" + }, + "DataSourceConfig":{ + "type":"structure", + "required":[ + "name", + "s3Bucket", + "s3Keys" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the data source.

" + }, + "s3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The S3 bucket where the data files are located.

" + }, + "s3Keys":{ + "shape":"S3Keys", + "documentation":"

The list of S3 keys identifying the data source files.

" + } + }, + "documentation":"

Information about a data source.

" + }, + "DataSourceConfigs":{ + "type":"list", + "member":{"shape":"DataSourceConfig"}, + "max":5, + "min":1 + }, + "DataSourceNames":{ + "type":"list", + "member":{"shape":"Name"} + }, + "DataSources":{ + "type":"list", + "member":{"shape":"DataSource"} + }, "DeleteFleetRequest":{ "type":"structure", "required":["fleet"], @@ -1326,6 +1397,10 @@ "failureThresholdPercentage":{ "shape":"Percentage", "documentation":"

The percentage of deployments that need to fail before stopping deployment.

" + }, + "robotDeploymentTimeoutInSeconds":{ + "shape":"DeploymentTimeout", + "documentation":"

The amount of time, in seconds, to wait for deployment to a single robot to complete. Choose a time between 1 minute and 7 days. The default is 5 hours.

" } }, "documentation":"

Information about a deployment configuration.

" @@ -1437,6 +1512,7 @@ "Canceled" ] }, + "DeploymentTimeout":{"type":"long"}, "DeploymentVersion":{ "type":"string", "max":255, @@ -1793,6 +1869,10 @@ "shape":"OutputLocation", "documentation":"

Location for output files generated by the simulation job.

" }, + "loggingConfig":{ + "shape":"LoggingConfig", + "documentation":"

The logging configuration.

" + }, "maxJobDurationInSeconds":{ "shape":"JobDuration", "documentation":"

The maximum job duration in seconds. The value must be 8 days (691,200 seconds) or less.

" @@ -1813,6 +1893,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

A list of simulation applications.

" }, + "dataSources":{ + "shape":"DataSources", + "documentation":"

The data sources for the simulation job.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The list of all tags added to the specified simulation job.

" @@ -1820,6 +1904,10 @@ "vpcConfig":{ "shape":"VPCConfigResponse", "documentation":"

The VPC configuration.

" + }, + "networkInterface":{ + "shape":"NetworkInterface", + "documentation":"

The network interface information for the simulation job.

" } } }, @@ -1971,6 +2059,10 @@ "environmentVariables":{ "shape":"EnvironmentVariableMap", "documentation":"

The environment variables for the application launch.

" + }, + "portForwardingConfig":{ + "shape":"PortForwardingConfig", + "documentation":"

The port forwarding configuration.

" } }, "documentation":"

Information about a launch configuration.

" @@ -2194,6 +2286,17 @@ } } }, + "LoggingConfig":{ + "type":"structure", + "required":["recordAllRosTopics"], + "members":{ + "recordAllRosTopics":{ + "shape":"BoxedBoolean", + "documentation":"

A boolean indicating whether to record all ROS topics.

" + } + }, + "documentation":"

The logging configuration.

" + }, "MaxResults":{"type":"integer"}, "Name":{ "type":"string", @@ -2201,10 +2304,33 @@ "min":1, "pattern":"[a-zA-Z0-9_\\-]*" }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "networkInterfaceId":{ + "shape":"GenericString", + "documentation":"

The ID of the network interface.

" + }, + "privateIpAddress":{ + "shape":"GenericString", + "documentation":"

The IPv4 address of the network interface within the subnet.

" + }, + "publicIpAddress":{ + "shape":"GenericString", + "documentation":"

The IPv4 public address of the network interface.

" + } + }, + "documentation":"

Describes a network interface.

" + }, "NonEmptyString":{ "type":"string", "min":1 }, + "NonSystemPort":{ + "type":"integer", + "max":65535, + "min":1024 + }, "OutputLocation":{ "type":"structure", "members":{ @@ -2240,6 +2366,49 @@ "max":100, "min":1 }, + "Port":{ + "type":"integer", + "max":65535, + "min":1 + }, + "PortForwardingConfig":{ + "type":"structure", + "members":{ + "portMappings":{ + "shape":"PortMappingList", + "documentation":"

The port mappings for the configuration.

" + } + }, + "documentation":"

Configuration information for port forwarding.

" + }, + "PortMapping":{ + "type":"structure", + "required":[ + "jobPort", + "applicationPort" + ], + "members":{ + "jobPort":{ + "shape":"Port", + "documentation":"

The port number on the simulation job instance to use as a remote connection point.

" + }, + "applicationPort":{ + "shape":"NonSystemPort", + "documentation":"

The port number on the application.

" + }, + "enableOnPublicIp":{ + "shape":"Boolean", + "documentation":"

A Boolean indicating whether to enable this port mapping on public IP.

" + } + }, + "documentation":"

An object representing a port mapping.

" + }, + "PortMappingList":{ + "type":"list", + "member":{"shape":"PortMapping"}, + "max":10, + "min":0 + }, "ProgressDetail":{ "type":"structure", "members":{ @@ -2425,9 +2594,7 @@ }, "RobotApplicationNames":{ "type":"list", - "member":{"shape":"Name"}, - "max":1, - "min":1 + "member":{"shape":"Name"} }, "RobotApplicationSummaries":{ "type":"list", @@ -2566,6 +2733,30 @@ "min":1, "pattern":".*" }, + "S3KeyOutput":{ + "type":"structure", + "members":{ + "s3Key":{ + "shape":"S3Key", + "documentation":"

The S3 key.

" + }, + "etag":{ + "shape":"S3Etag", + "documentation":"

The etag for the object.

" + } + }, + "documentation":"

Information about S3 keys.

" + }, + "S3KeyOutputs":{ + "type":"list", + "member":{"shape":"S3KeyOutput"} + }, + "S3Keys":{ + "type":"list", + "member":{"shape":"S3Key"}, + "max":100, + "min":1 + }, "SecurityGroups":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -2611,9 +2802,7 @@ }, "SimulationApplicationNames":{ "type":"list", - "member":{"shape":"Name"}, - "max":1, - "min":1 + "member":{"shape":"Name"} }, "SimulationApplicationSummaries":{ "type":"list", @@ -2694,6 +2883,10 @@ "shape":"OutputLocation", "documentation":"

Location for output files generated by the simulation job.

" }, + "loggingConfig":{ + "shape":"LoggingConfig", + "documentation":"

The logging configuration.

" + }, "maxJobDurationInSeconds":{ "shape":"JobDuration", "documentation":"

The maximum simulation job duration in seconds. The value must be 8 days (691,200 seconds) or less.

" @@ -2714,6 +2907,10 @@ "shape":"SimulationApplicationConfigs", "documentation":"

A list of simulation applications.

" }, + "dataSources":{ + "shape":"DataSources", + "documentation":"

The data sources for the simulation job.

" + }, "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job.

" @@ -2721,6 +2918,10 @@ "vpcConfig":{ "shape":"VPCConfigResponse", "documentation":"

VPC configuration information.

" + }, + "networkInterface":{ + "shape":"NetworkInterface", + "documentation":"

" } }, "documentation":"

Information about a simulation job.

" @@ -2733,6 +2934,7 @@ "SimulationApplicationCrash", "BadPermissionsRobotApplication", "BadPermissionsSimulationApplication", + "BadPermissionsS3Object", "BadPermissionsS3Output", "BadPermissionsCloudwatchLogs", "SubnetIpLimitExceeded", @@ -2740,8 +2942,13 @@ "BadPermissionsUserCredentials", "InvalidBundleRobotApplication", "InvalidBundleSimulationApplication", + "InvalidS3Resource", + "MismatchedEtag", "RobotApplicationVersionMismatchedEtag", "SimulationApplicationVersionMismatchedEtag", + "ResourceNotFound", + "InvalidInput", + "WrongRegionS3Bucket", "WrongRegionS3Output", "WrongRegionRobotApplication", "WrongRegionSimulationApplication" @@ -2794,6 +3001,10 @@ "robotApplicationNames":{ "shape":"RobotApplicationNames", "documentation":"

A list of simulation job robot application names.

" + }, + "dataSourceNames":{ + "shape":"DataSourceNames", + "documentation":"

The names of the data sources.

" } }, "documentation":"

Summary information for a simulation job.

" @@ -2818,11 +3029,14 @@ }, "SimulationSoftwareSuiteType":{ "type":"string", - "enum":["Gazebo"] + "enum":[ + "Gazebo", + "RosbagPlay" + ] }, "SimulationSoftwareSuiteVersionType":{ "type":"string", - "pattern":"7|9" + "pattern":"7|9|Kinetic|Melodic" }, "SimulationTimeMillis":{"type":"long"}, "Source":{ @@ -3081,8 +3295,7 @@ "application", "sources", "simulationSoftwareSuite", - "robotSoftwareSuite", - "renderingEngine" + "robotSoftwareSuite" ], "members":{ "application":{ diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index ab14fda9..255d3221 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -42,7 +42,7 @@ }, "input":{"shape":"CreateCodeRepositoryInput"}, "output":{"shape":"CreateCodeRepositoryOutput"}, - "documentation":"

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

" + "documentation":"

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

" }, "CreateCompilationJob":{ "name":"CreateCompilationJob", @@ -69,7 +69,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

" + "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

" }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -110,7 +110,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

" + "documentation":"

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

" }, "CreateModel":{ "name":"CreateModel", @@ -146,7 +146,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

" + "documentation":"

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

" }, "CreateNotebookInstanceLifecycleConfig":{ "name":"CreateNotebookInstanceLifecycleConfig", @@ -183,7 +183,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

For more information about Amazon SageMaker, see How It Works.

" + "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

For more information about Amazon SageMaker, see How It Works.

" }, "CreateTransformJob":{ "name":"CreateTransformJob", @@ -1066,7 +1066,7 @@ "members":{ "AnnotationConsolidationLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

For more information, see Annotation Consolidation.

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

For more information, see Annotation Consolidation.

" } }, "documentation":"

Configures how labels are consolidated across human workers.

" @@ -1102,6 +1102,10 @@ "SingleRecord" ] }, + "BillableTimeInSeconds":{ + "type":"integer", + "min":1 + }, "Boolean":{"type":"boolean"}, "BooleanOperator":{ "type":"string", @@ -1242,6 +1246,21 @@ "max":8, "min":1 }, + "CheckpointConfig":{ + "type":"structure", + "required":["S3Uri"], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

Identifies the S3 path where you want Amazon SageMaker to store checkpoints. For example, s3://bucket-name/key-name-prefix.

" + }, + "LocalPath":{ + "shape":"DirectoryPath", + "documentation":"

(Optional) The local directory where checkpoints are written. The default directory is /opt/ml/checkpoints/.

" + } + }, + "documentation":"

Contains information about the output location for managed spot training checkpoint data.

" + }, "CodeRepositoryArn":{ "type":"string", "max":2048, @@ -1437,7 +1456,7 @@ "members":{ "ContainerHostname":{ "shape":"ContainerHostname", - "documentation":"

This parameter is ignored for models that contain only a PrimaryContainer.

When a ContainerDefinition is part of an inference pipeline, the value of ths parameter uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned based on the position of the ContainerDefinition in the pipeline. If you specify a value for the ContainerHostName for any ContainerDefinition that is part of an inference pipeline, you must specify a value for the ContainerHostName parameter of every ContainerDefinition in that pipeline.

" + "documentation":"

This parameter is ignored for models that contain only a PrimaryContainer.

When a ContainerDefinition is part of an inference pipeline, the value of ths parameter uniquely identifies the container for the purposes of logging and metrics. For information, see Use Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a value for this parameter for a ContainerDefinition that is part of an inference pipeline, a unique name is automatically assigned based on the position of the ContainerDefinition in the pipeline. If you specify a value for the ContainerHostName for any ContainerDefinition that is part of an inference pipeline, you must specify a value for the ContainerHostName parameter of every ContainerDefinition in that pipeline.

" }, "Image":{ "shape":"Image", @@ -1445,7 +1464,7 @@ }, "ModelDataUrl":{ "shape":"Url", - "documentation":"

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common Parameters.

If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

If you use a built-in algorithm to create a model, Amazon SageMaker requires that you provide a S3 path to the model artifacts in ModelDataUrl.

" + "documentation":"

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common Parameters.

If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

If you use a built-in algorithm to create a model, Amazon SageMaker requires that you provide a S3 path to the model artifacts in ModelDataUrl.

" }, "Environment":{ "shape":"EnvironmentMap", @@ -1511,7 +1530,7 @@ }, "ScalingType":{ "shape":"HyperParameterScalingType", - "documentation":"

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

ReverseLogarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.

" + "documentation":"

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

ReverseLogarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.

" } }, "documentation":"

A list of continuous hyperparameters to tune.

" @@ -1673,7 +1692,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

" + "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

Nitro-based instances do not support encryption with AWS KMS. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances, the call to CreateEndpointConfig fails.

For a list of nitro-based instances, see Nitro-based Instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

For more information about storage volumes on nitro-based instances, see Amazon EBS and NVMe on Linux Instances.

" } } }, @@ -1743,7 +1762,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see AWS Tagging Strategies.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see AWS Tagging Strategies.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

" } } }, @@ -1806,7 +1825,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" } } }, @@ -1933,11 +1952,11 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" + "documentation":"

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissionsto to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

" }, "Tags":{ "shape":"TagList", @@ -1957,15 +1976,15 @@ }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "RootAccess":{ "shape":"RootAccess", @@ -2061,11 +2080,11 @@ }, "InputDataConfig":{ "shape":"InputDataConfig", - "documentation":"

An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.

Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3 location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.

Depending on the input mode that the algorithm supports, Amazon SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams.

" + "documentation":"

An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.

Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.

Depending on the input mode that the algorithm supports, Amazon SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files will be made available as input streams. They do not need to be downloaded.

" }, "OutputDataConfig":{ "shape":"OutputDataConfig", - "documentation":"

Specifies the path to the S3 bucket where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.

" + "documentation":"

Specifies the path to the S3 location where you want to store model artifacts. Amazon SageMaker creates subfolders for the artifacts.

" }, "ResourceConfig":{ "shape":"ResourceConfig", @@ -2090,6 +2109,14 @@ "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", "documentation":"

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job.

" + }, + "EnableManagedSpotTraining":{ + "shape":"Boolean", + "documentation":"

To train models using managed spot training, choose True. Managed spot training provides a fully managed and scalable infrastructure for training machine learning models. this option is useful when training jobs can be interrupted and when there is flexibility when the training job is run.

The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be used as a starting point to train models incrementally. Amazon SageMaker provides metrics and logs in CloudWatch. They can be used to see when managed spot training jobs are running, interrupted, resumed, or completed.

" + }, + "CheckpointConfig":{ + "shape":"CheckpointConfig", + "documentation":"

Contains information about the output location for managed spot training checkpoint data.

" } } }, @@ -2123,7 +2150,7 @@ }, "MaxConcurrentTransforms":{ "shape":"MaxConcurrentTransforms", - "documentation":"

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the optimal settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

" + "documentation":"

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the optimal settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

" }, "MaxPayloadInMB":{ "shape":"MaxPayloadInMB", @@ -2151,7 +2178,7 @@ }, "DataProcessing":{ "shape":"DataProcessing", - "documentation":"

The data structure used for combining the input data and inference in the output file. For more information, see Batch Transform I/O Join.

" + "documentation":"

The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records.

" }, "Tags":{ "shape":"TagList", @@ -2183,7 +2210,7 @@ }, "MemberDefinitions":{ "shape":"MemberDefinitions", - "documentation":"

A list of MemberDefinition objects that contains objects that identify the Amazon Cognito user pool that makes up the work team. For more information, see Amazon Cognito User Pools.

All of the CognitoMemberDefinition objects that make up the member definition must have the same ClientId and UserPool values.

" + "documentation":"

A list of MemberDefinition objects that contains objects that identify the Amazon Cognito user pool that makes up the work team. For more information, see Amazon Cognito User Pools.

All of the CognitoMemberDefinition objects that make up the member definition must have the same ClientId and UserPool values.

" }, "Description":{ "shape":"String200", @@ -2195,7 +2222,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

" + "documentation":"

An array of key-value pairs.

For more information, see Resource Tag and Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" } } }, @@ -2220,18 +2247,18 @@ "members":{ "InputFilter":{ "shape":"JsonPath", - "documentation":"

A JSONPath expression used to select a portion of the input data to pass to the algorithm. Use the InputFilter parameter to exclude fields, such as an ID column, from the input. If you want Amazon SageMaker to pass the entire input dataset to the algorithm, accept the default value $.

Examples: \"$\", \"$[1:]\", \"$.features\"

" + "documentation":"

A JSONPath expression used to select a portion of the input data to pass to the algorithm. Use the InputFilter parameter to exclude fields, such as an ID column, from the input. If you want Amazon SageMaker to pass the entire input dataset to the algorithm, accept the default value $.

Examples: \"$\", \"$[1:]\", \"$.features\"

" }, "OutputFilter":{ "shape":"JsonPath", - "documentation":"

A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job. If you want Amazon SageMaker to store the entire input dataset in the output file, leave the default value, $. If you specify indexes that aren't within the dimension size of the joined dataset, you get an error.

Examples: \"$\", \"$[0,5:]\", \"$.['id','SageMakerOutput']\"

" + "documentation":"

A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job. If you want Amazon SageMaker to store the entire input dataset in the output file, leave the default value, $. If you specify indexes that aren't within the dimension size of the joined dataset, you get an error.

Examples: \"$\", \"$[0,5:]\", \"$['id','SageMakerOutput']\"

" }, "JoinSource":{ "shape":"JoinSource", - "documentation":"

Specifies the source of the data to join with the transformed data. The valid values are None and Input The default value is None which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input. To join input and output, the batch transform job must satisfy the Requirements for Using Batch Transform I/O Join.

For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.

" + "documentation":"

Specifies the source of the data to join with the transformed data. The valid values are None and Input The default value is None which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input.

For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.

" } }, - "documentation":"

The data structure used to combine the input data and transformed data from the batch transform output into a joined dataset and to store it in an output file. It also contains information on how to filter the input data and the joined dataset. For more information, see Batch Transform I/O Join.

" + "documentation":"

The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records.

" }, "DataSource":{ "type":"structure", @@ -2239,6 +2266,10 @@ "S3DataSource":{ "shape":"S3DataSource", "documentation":"

The S3 location of the data source that is associated with a channel.

" + }, + "FileSystemDataSource":{ + "shape":"FileSystemDataSource", + "documentation":"

The file system that is associated with a channel.

" } }, "documentation":"

Describes the location of the channel data.

" @@ -2381,7 +2412,7 @@ "documentation":"

The date and time when the image path for the model resolved to the ResolvedImage

" } }, - "documentation":"

Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.

If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.

" + "documentation":"

Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.

If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.

" }, "DeployedImages":{ "type":"list", @@ -2843,7 +2874,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" }, "LabelingJobOutput":{ "shape":"LabelingJobOutput", @@ -3084,15 +3115,15 @@ }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of the Elastic Inference (EI) instance types associated with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

A list of the Elastic Inference (EI) instance types associated with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "RootAccess":{ "shape":"RootAccess", @@ -3170,7 +3201,7 @@ }, "SecondaryStatus":{ "shape":"SecondaryStatus", - "documentation":"

Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition.

Amazon SageMaker provides primary statuses and secondary statuses that apply to each of them:

InProgress
  • Starting - Starting the training job.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

Valid values for SecondaryStatus are subject to change.

We no longer support the following secondary statuses:

" + "documentation":"

Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition.

Amazon SageMaker provides primary statuses and secondary statuses that apply to each of them:

InProgress
  • Starting - Starting the training job.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Interrupted - The job stopped because the managed spot training instances were interrupted.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • MaxWaitTmeExceeded - The job stopped because it exceeded the maximum allowed wait time.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

Valid values for SecondaryStatus are subject to change.

We no longer support the following secondary statuses:

" }, "FailureReason":{ "shape":"FailureReason", @@ -3206,7 +3237,7 @@ }, "StoppingCondition":{ "shape":"StoppingCondition", - "documentation":"

Specifies a limit to how long a model training job can run. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

" + "documentation":"

Specifies a limit to how long a model training job can run. It also specifies the maximum time to wait for a spot instance. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

" }, "CreationTime":{ "shape":"Timestamp", @@ -3239,6 +3270,19 @@ "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", "documentation":"

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithms in distributed training.

" + }, + "EnableManagedSpotTraining":{ + "shape":"Boolean", + "documentation":"

A Boolean indicating whether managed spot training is enabled (True) or not (False).

" + }, + "CheckpointConfig":{"shape":"CheckpointConfig"}, + "TrainingTimeInSeconds":{ + "shape":"TrainingTimeInSeconds", + "documentation":"

The training time in seconds.

" + }, + "BillableTimeInSeconds":{ + "shape":"BillableTimeInSeconds", + "documentation":"

The billable time in seconds.

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

" } } }, @@ -3278,7 +3322,7 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

If the transform job failed, FailureReason describes why it failed. A transform job creates a log file, which includes error messages, and stores it as an Amazon S3 object. For more information, see Log Amazon SageMaker Events with Amazon CloudWatch.

" + "documentation":"

If the transform job failed, FailureReason describes why it failed. A transform job creates a log file, which includes error messages, and stores it as an Amazon S3 object. For more information, see Log Amazon SageMaker Events with Amazon CloudWatch.

" }, "ModelName":{ "shape":"ModelName", @@ -3400,6 +3444,11 @@ "Disabled" ] }, + "DirectoryPath":{ + "type":"string", + "max":4096, + "pattern":".*" + }, "DisassociateAdditionalCodeRepositories":{"type":"boolean"}, "DisassociateDefaultCodeRepository":{"type":"boolean"}, "DisassociateNotebookInstanceAcceleratorTypes":{"type":"boolean"}, @@ -3564,6 +3613,53 @@ "type":"string", "max":1024 }, + "FileSystemAccessMode":{ + "type":"string", + "enum":[ + "rw", + "ro" + ] + }, + "FileSystemDataSource":{ + "type":"structure", + "required":[ + "FileSystemId", + "FileSystemAccessMode", + "FileSystemType", + "DirectoryPath" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The file system id.

" + }, + "FileSystemAccessMode":{ + "shape":"FileSystemAccessMode", + "documentation":"

The access mode of the mount of the directory associated with the channel. A directory can be mounted either in ro (read-only) or rw (read-write) mode.

" + }, + "FileSystemType":{ + "shape":"FileSystemType", + "documentation":"

The file system type.

" + }, + "DirectoryPath":{ + "shape":"DirectoryPath", + "documentation":"

The full path to the directory to associate with the channel.

" + } + }, + "documentation":"

Specifies a file system data source for a channel.

" + }, + "FileSystemId":{ + "type":"string", + "min":11, + "pattern":".*" + }, + "FileSystemType":{ + "type":"string", + "enum":[ + "EFS", + "FSxLustre" + ] + }, "Filter":{ "type":"structure", "required":["Name"], @@ -3620,7 +3716,7 @@ "FinalMetricDataList":{ "type":"list", "member":{"shape":"MetricData"}, - "max":20, + "max":40, "min":0 }, "Float":{"type":"float"}, @@ -3713,7 +3809,7 @@ }, "PreHumanTaskLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

US East (Northern Virginia) (us-east-1):

US East (Ohio) (us-east-2):

US West (Oregon) (us-west-2):

EU (Ireland) (eu-west-1):

Asia Pacific (Tokyo) (ap-northeast-1):

Asia Pacific (Sydney) (ap-southeast-1):

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

US East (Northern Virginia) (us-east-1):

US East (Ohio) (us-east-2):

US West (Oregon) (us-west-2):

Canada (Central) (ca-central-1):

EU (Ireland) (eu-west-1):

EU (London) (eu-west-2):

EU Frankfurt (eu-central-1):

Asia Pacific (Tokyo) (ap-northeast-1):

Asia Pacific (Seoul) (ap-northeast-2):

Asia Pacific (Mumbai) (ap-south-1):

Asia Pacific (Singapore) (ap-southeast-1):

Asia Pacific (Sydney) (ap-southeast-2):

" }, "TaskKeywords":{ "shape":"TaskKeywords", @@ -3737,7 +3833,7 @@ }, "TaskAvailabilityLifetimeInSeconds":{ "shape":"TaskAvailabilityLifetimeInSeconds", - "documentation":"

The length of time that a task remains available for labelling by human workers.

" + "documentation":"

The length of time that a task remains available for labeling by human workers. If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43200). For private and vendor workforces, the maximum is as listed.

" }, "MaxConcurrentTaskCount":{ "shape":"MaxConcurrentTaskCount", @@ -3749,7 +3845,7 @@ }, "PublicWorkforceTaskPrice":{ "shape":"PublicWorkforceTaskPrice", - "documentation":"

The price that you pay for each task performed by a public worker.

" + "documentation":"

The price that you pay for each task performed by an Amazon Mechanical Turk worker.

" } }, "documentation":"

Information required for human workers to complete a labeling task.

" @@ -3870,7 +3966,7 @@ }, "StoppingCondition":{ "shape":"StoppingCondition", - "documentation":"

Specifies a limit to how long a model hyperparameter training job can run. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

" + "documentation":"

Specifies a limit to how long a model hyperparameter training job can run. It also specifies how long you are willing to wait for a managed spot training job to complete. When the job reaches the a limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

" }, "EnableNetworkIsolation":{ "shape":"Boolean", @@ -3879,7 +3975,12 @@ "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", "documentation":"

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

" - } + }, + "EnableManagedSpotTraining":{ + "shape":"Boolean", + "documentation":"

A Boolean indicating whether managed spot training is enabled (True) or not (False).

" + }, + "CheckpointConfig":{"shape":"CheckpointConfig"} }, "documentation":"

Defines the training jobs launched by a hyperparameter tuning job.

" }, @@ -3958,7 +4059,7 @@ "members":{ "Strategy":{ "shape":"HyperParameterTuningJobStrategyType", - "documentation":"

Specifies how hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches. To use the Bayesian search stategy, set this to Bayesian. To randomly search, set it to Random. For information about search strategies, see How Hyperparameter Tuning Works.

" + "documentation":"

Specifies how hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches. To use the Bayesian search stategy, set this to Bayesian. To randomly search, set it to Random. For information about search strategies, see How Hyperparameter Tuning Works.

" }, "HyperParameterTuningJobObjective":{ "shape":"HyperParameterTuningJobObjective", @@ -3974,7 +4075,7 @@ }, "TrainingJobEarlyStoppingType":{ "shape":"TrainingJobEarlyStoppingType", - "documentation":"

Specifies whether to use early stopping for training jobs launched by the hyperparameter tuning job. This can be one of the following values (the default value is OFF):

OFF

Training jobs launched by the hyperparameter tuning job do not use early stopping.

AUTO

Amazon SageMaker stops training jobs launched by the hyperparameter tuning job when they are unlikely to perform better than previously completed training jobs. For more information, see Stop Training Jobs Early.

" + "documentation":"

Specifies whether to use early stopping for training jobs launched by the hyperparameter tuning job. This can be one of the following values (the default value is OFF):

OFF

Training jobs launched by the hyperparameter tuning job do not use early stopping.

AUTO

Amazon SageMaker stops training jobs launched by the hyperparameter tuning job when they are unlikely to perform better than previously completed training jobs. For more information, see Stop Training Jobs Early.

" } }, "documentation":"

Configures a hyperparameter tuning job.

" @@ -4108,7 +4209,7 @@ "members":{ "ParentHyperParameterTuningJobs":{ "shape":"ParentHyperParameterTuningJobs", - "documentation":"

An array of hyperparameter tuning jobs that are used as the starting point for the new hyperparameter tuning job. For more information about warm starting a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job as a Starting Point.

Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent jobs for warm start tuning jobs.

" + "documentation":"

An array of hyperparameter tuning jobs that are used as the starting point for the new hyperparameter tuning job. For more information about warm starting a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job as a Starting Point.

Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent jobs for warm start tuning jobs.

" }, "WarmStartType":{ "shape":"HyperParameterTuningJobWarmStartType", @@ -4273,7 +4374,7 @@ }, "ScalingType":{ "shape":"HyperParameterScalingType", - "documentation":"

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

" + "documentation":"

The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

Auto

Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

Linear

Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

Logarithmic

Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.

Logarithmic scaling works only for ranges that have only values greater than 0.

" } }, "documentation":"

For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches.

" @@ -4395,7 +4496,7 @@ "members":{ "LabelingJobAlgorithmSpecificationArn":{ "shape":"LabelingJobAlgorithmSpecificationArn", - "documentation":"

Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You must select one of the following ARNs:

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You must select one of the following ARNs:

" }, "InitialActiveLearningModelArn":{ "shape":"ModelArn", @@ -4529,7 +4630,7 @@ "members":{ "VolumeKmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service key ID for the key used to encrypt the output data, if any.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:

" } }, "documentation":"

Provides configuration information for labeling jobs.

" @@ -4616,7 +4717,7 @@ }, "AnnotationConsolidationLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the Lambda function used to consolidate the annotations from individual workers into a label for a data object. For more information, see Annotation Consolidation.

" + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function used to consolidate the annotations from individual workers into a label for a data object. For more information, see Annotation Consolidation.

" }, "FailureReason":{ "shape":"FailureReason", @@ -5628,6 +5729,10 @@ "type":"integer", "min":1 }, + "MaxWaitTimeInSeconds":{ + "type":"integer", + "min":1 + }, "MemberDefinition":{ "type":"structure", "members":{ @@ -5678,12 +5783,12 @@ "documentation":"

A regular expression that searches the output of a training job and gets the value of the metric. For more information about using regular expressions to define metrics, see Defining Objective Metrics.

" } }, - "documentation":"

Specifies a metric that the training algorithm writes to stderr or stdout. Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.

" + "documentation":"

Specifies a metric that the training algorithm writes to stderr or stdout . Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.

" }, "MetricDefinitionList":{ "type":"list", "member":{"shape":"MetricDefinition"}, - "max":20, + "max":40, "min":0 }, "MetricName":{ @@ -5949,7 +6054,7 @@ "members":{ "NestedPropertyName":{ "shape":"ResourcePropertyName", - "documentation":"

The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig.

" + "documentation":"

The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig .

" }, "Filters":{ "shape":"FilterList", @@ -6144,11 +6249,11 @@ }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" } }, "documentation":"

Provides summary information for an Amazon SageMaker notebook instance.

" @@ -6255,7 +6360,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" }, "S3OutputPath":{ "shape":"S3Uri", @@ -6391,7 +6496,7 @@ }, "AcceleratorType":{ "shape":"ProductionVariantAcceleratorType", - "documentation":"

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker.

" } }, "documentation":"

Identifies a model that you want to host and the resources to deploy for hosting it. If you are deploying multiple models, tell Amazon SageMaker how to distribute traffic among the models by specifying variant weights.

" @@ -6438,12 +6543,25 @@ "ml.c5.2xlarge", "ml.c5.4xlarge", "ml.c5.9xlarge", - "ml.c5.18xlarge" + "ml.c5.18xlarge", + "ml.g4dn.xlarge", + "ml.g4dn.2xlarge", + "ml.g4dn.4xlarge", + "ml.g4dn.8xlarge", + "ml.g4dn.12xlarge", + "ml.g4dn.16xlarge", + "ml.r5.large", + "ml.r5.xlarge", + "ml.r5.2xlarge", + "ml.r5.4xlarge", + "ml.r5.12xlarge", + "ml.r5.24xlarge" ] }, "ProductionVariantList":{ "type":"list", "member":{"shape":"ProductionVariant"}, + "max":10, "min":1 }, "ProductionVariantSummary":{ @@ -6518,10 +6636,10 @@ "members":{ "AmountInUsd":{ "shape":"USD", - "documentation":"

Defines the amount of money paid to a worker in United States dollars.

" + "documentation":"

Defines the amount of money paid to an Amazon Mechanical Turk worker in United States dollars.

" } }, - "documentation":"

Defines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.

Use one of the following prices for bounding box tasks. Prices are in US dollars.

Use one of the following prices for image classification, text classification, and custom tasks. Prices are in US dollars.

Use one of the following prices for semantic segmentation tasks. Prices are in US dollars.

" + "documentation":"

Defines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.

Use one of the following prices for bounding box tasks. Prices are in US dollars and should be based on the complexity of the task; the longer it takes in your initial testing, the more you should offer.

Use one of the following prices for image classification, text classification, and custom tasks. Prices are in US dollars.

Use one of the following prices for semantic segmentation tasks. Prices are in US dollars.

" }, "RealtimeInferenceInstanceTypes":{ "type":"list", @@ -6865,7 +6983,9 @@ "Stopped", "MaxRuntimeExceeded", "Completed", - "Failed" + "Failed", + "Interrupted", + "MaxWaitTimeExceeded" ] }, "SecondaryStatusTransition":{ @@ -7064,9 +7184,13 @@ "MaxRuntimeInSeconds":{ "shape":"MaxRuntimeInSeconds", "documentation":"

The maximum length of time, in seconds, that the training or compilation job can run. If job does not complete during this time, Amazon SageMaker ends the job. If value is not specified, default value is 1 day. The maximum value is 28 days.

" + }, + "MaxWaitTimeInSeconds":{ + "shape":"MaxWaitTimeInSeconds", + "documentation":"

The maximum length of time, in seconds, how long you are willing to wait for a managed spot training job to complete. It is the amount of time spent waiting for Spot capacity plus the amount of time the training job runs. It must be equal to or greater than MaxRuntimeInSeconds.

" } }, - "documentation":"

Specifies a limit to how long a model training or compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" + "documentation":"

Specifies a limit to how long a model training or compilation job can run. It also specifies how long you are willing to wait for a managed spot training job to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

" }, "String":{"type":"string"}, "String200":{ @@ -7187,7 +7311,10 @@ "deeplens", "rk3399", "rk3288", - "sbe_c" + "aisage", + "sbe_c", + "qcs605", + "qcs603" ] }, "TaskAvailabilityLifetimeInSeconds":{ @@ -7226,7 +7353,7 @@ "TaskTimeLimitInSeconds":{ "type":"integer", "max":28800, - "min":1 + "min":30 }, "TaskTitle":{ "type":"string", @@ -7281,6 +7408,7 @@ "ml.p3.2xlarge", "ml.p3.8xlarge", "ml.p3.16xlarge", + "ml.p3dn.24xlarge", "ml.c5.xlarge", "ml.c5.2xlarge", "ml.c5.4xlarge", @@ -7585,6 +7713,10 @@ }, "documentation":"

Defines how the algorithm is used for a training job.

" }, + "TrainingTimeInSeconds":{ + "type":"integer", + "min":1 + }, "TransformDataSource":{ "type":"structure", "required":["S3DataSource"], @@ -7811,7 +7943,7 @@ "members":{ "InstanceType":{ "shape":"TransformInstanceType", - "documentation":"

The ML compute instance type for the transform job. If you are using built-in algorithms to transform moderately sized datasets, we recommend using ml.m4.xlarge or ml.m5.largeinstance types.

" + "documentation":"

The ML compute instance type for the transform job. If you are using built-in algorithms to transform moderately sized datasets, we recommend using ml.m4.xlarge or ml.m5.large instance types.

" }, "InstanceCount":{ "shape":"TransformInstanceCount", @@ -7866,7 +7998,7 @@ "members":{ "UiTemplateS3Uri":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" + "documentation":"

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" } }, "documentation":"

Provided configuration information for the worker UI for a labeling job.

" @@ -7990,15 +8122,15 @@ }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", - "documentation":"

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AdditionalCodeRepositories":{ "shape":"AdditionalCodeRepositoryNamesOrUrls", - "documentation":"

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "documentation":"

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" }, "DisassociateAcceleratorTypes":{ "shape":"DisassociateNotebookInstanceAcceleratorTypes", @@ -8028,11 +8160,11 @@ }, "OnCreate":{ "shape":"NotebookInstanceLifecycleConfigList", - "documentation":"

The shell script that runs only once, when you create a notebook instance

" + "documentation":"

The shell script that runs only once, when you create a notebook instance. The shell script must be a base64-encoded string.

" }, "OnStart":{ "shape":"NotebookInstanceLifecycleConfigList", - "documentation":"

The shell script that runs every time you start a notebook instance, including when you create the notebook instance.

" + "documentation":"

The shell script that runs every time you start a notebook instance, including when you create the notebook instance. The shell script must be a base64-encoded string.

" } } }, @@ -8161,7 +8293,10 @@ "shape":"Timestamp", "documentation":"

The date and time that the work team was last updated (timestamp).

" }, - "NotificationConfiguration":{"shape":"NotificationConfiguration"} + "NotificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

Configures SNS notifications of available or expiring work items for work teams.

" + } }, "documentation":"

Provides details about a labeling work team.

" }, diff --git a/botocore/data/securityhub/2018-10-26/service-2.json b/botocore/data/securityhub/2018-10-26/service-2.json index 7b1a0c7b..ea899ddf 100644 --- a/botocore/data/securityhub/2018-10-26/service-2.json +++ b/botocore/data/securityhub/2018-10-26/service-2.json @@ -641,6 +641,10 @@ "shapes":{ "AcceptInvitationRequest":{ "type":"structure", + "required":[ + "MasterId", + "InvitationId" + ], "members":{ "MasterId":{ "shape":"NonEmptyString", @@ -1523,6 +1527,7 @@ }, "DeclineInvitationsRequest":{ "type":"structure", + "required":["AccountIds"], "members":{ "AccountIds":{ "shape":"AccountIdList", @@ -1585,6 +1590,7 @@ }, "DeleteInvitationsRequest":{ "type":"structure", + "required":["AccountIds"], "members":{ "AccountIds":{ "shape":"AccountIdList", @@ -1655,7 +1661,9 @@ "members":{ "HubArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the Hub resource to retrieve.

" + "documentation":"

The ARN of the Hub resource to retrieve.

", + "location":"querystring", + "locationName":"HubArn" } } }, diff --git a/botocore/data/ses/2010-12-01/service-2.json b/botocore/data/ses/2010-12-01/service-2.json index 635bac83..5c376aa4 100644 --- a/botocore/data/ses/2010-12-01/service-2.json +++ b/botocore/data/ses/2010-12-01/service-2.json @@ -772,7 +772,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

" + "documentation":"

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

" }, "SendTemplatedEmail":{ "name":"SendTemplatedEmail", @@ -822,7 +822,7 @@ "shape":"SetIdentityDkimEnabledResponse", "resultWrapper":"SetIdentityDkimEnabledResult" }, - "documentation":"

Enables or disables Easy DKIM signing of email sent from an identity:

For email addresses (for example, user@example.com), you can only enable Easy DKIM signing if the corresponding domain (in this case, example.com) has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim operation.

You can execute this operation no more than once per second.

For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

" + "documentation":"

Enables or disables Easy DKIM signing of email sent from an identity. If Easy DKIM signing is enabled for a domain, then Amazon SES uses DKIM to sign all email that it sends from addresses on that domain. If Easy DKIM signing is enabled for an email address, then Amazon SES uses DKIM to sign all email it sends from that address.

For email addresses (for example, user@example.com), you can only enable DKIM signing if the corresponding domain (in this case, example.com) has been set up to use Easy DKIM.

You can enable DKIM signing for an identity at any time after you start the verification process for the identity, even if the verification process isn't complete.

You can execute this operation no more than once per second.

For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

" }, "SetIdentityFeedbackForwardingEnabled":{ "name":"SetIdentityFeedbackForwardingEnabled", @@ -2048,15 +2048,15 @@ "members":{ "ToAddresses":{ "shape":"AddressList", - "documentation":"

The To: field(s) of the message.

" + "documentation":"

The recipients to place on the To: line of the message.

" }, "CcAddresses":{ "shape":"AddressList", - "documentation":"

The CC: field(s) of the message.

" + "documentation":"

The recipients to place on the CC: line of the message.

" }, "BccAddresses":{ "shape":"AddressList", - "documentation":"

The BCC: field(s) of the message.

" + "documentation":"

The recipients to place on the BCC: line of the message.

" } }, "documentation":"

Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

" @@ -2836,7 +2836,7 @@ "documentation":"

The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if you specify a value less than 1 or greater than 50, the operation will return up to 50 results.

" } }, - "documentation":"

Represents a request to list the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

" + "documentation":"

Represents a request to list the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

" }, "ListCustomVerificationEmailTemplatesResponse":{ "type":"structure", @@ -4056,7 +4056,7 @@ "members":{ "Scope":{ "shape":"StopScope", - "documentation":"

The name of the RuleSet that is being stopped.

" + "documentation":"

The scope of the StopAction. The only acceptable value is RuleSet.

" }, "TopicArn":{ "shape":"AmazonResourceName", diff --git a/botocore/data/sqs/2012-11-05/service-2.json b/botocore/data/sqs/2012-11-05/service-2.json index 7e0994bd..2a50ef13 100644 --- a/botocore/data/sqs/2012-11-05/service-2.json +++ b/botocore/data/sqs/2012-11-05/service-2.json @@ -283,7 +283,7 @@ "requestUri":"/" }, "input":{"shape":"TagQueueRequest"}, - "documentation":"

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "UntagQueue":{ "name":"UntagQueue", @@ -535,6 +535,11 @@ "shape":"QueueAttributeMap", "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

The following attributes apply only to server-side-encryption:

The following attributes apply only to FIFO (first-in-first-out) queues:

", "locationName":"Attribute" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

", + "locationName":"Tag" } }, "documentation":"

" @@ -906,6 +911,18 @@ }, "flattened":true }, + "MessageBodySystemAttributeMap":{ + "type":"map", + "key":{ + "shape":"MessageSystemAttributeNameForSends", + "locationName":"Name" + }, + "value":{ + "shape":"MessageSystemAttributeValue", + "locationName":"Value" + }, + "flattened":true + }, "MessageList":{ "type":"list", "member":{ @@ -948,9 +965,45 @@ "ApproximateFirstReceiveTimestamp", "SequenceNumber", "MessageDeduplicationId", - "MessageGroupId" + "MessageGroupId", + "AWSTraceHeader" ] }, + "MessageSystemAttributeNameForSends":{ + "type":"string", + "enum":["AWSTraceHeader"] + }, + "MessageSystemAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "StringValue":{ + "shape":"String", + "documentation":"

Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters.

" + }, + "BinaryValue":{ + "shape":"Binary", + "documentation":"

Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.

" + }, + "StringListValues":{ + "shape":"StringList", + "documentation":"

Not implemented. Reserved for future use.

", + "flattened":true, + "locationName":"StringListValue" + }, + "BinaryListValues":{ + "shape":"BinaryList", + "documentation":"

Not implemented. Reserved for future use.

", + "flattened":true, + "locationName":"BinaryListValue" + }, + "DataType":{ + "shape":"String", + "documentation":"

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

" + } + }, + "documentation":"

The user-specified message system attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

Name, type, value and the message body must not be empty or null.

" + }, "OverLimit":{ "type":"structure", "members":{ @@ -1083,7 +1136,7 @@ }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

A list of attributes that need to be returned along with each message. These attributes include:

" + "documentation":"

A list of attributes that need to be returned along with each message. These attributes include:

" }, "MessageAttributeNames":{ "shape":"MessageAttributeNameList", @@ -1178,6 +1231,11 @@ "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", "locationName":"MessageAttribute" }, + "MessageSystemAttributes":{ + "shape":"MessageBodySystemAttributeMap", + "documentation":"

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

", + "locationName":"MessageSystemAttribute" + }, "MessageDeduplicationId":{ "shape":"String", "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" @@ -1239,6 +1297,10 @@ "shape":"String", "documentation":"

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" }, + "MD5OfMessageSystemAttributes":{ + "shape":"String", + "documentation":"

An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" + }, "SequenceNumber":{ "shape":"String", "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The large, non-consecutive number that Amazon SQS assigns to each message.

The length of SequenceNumber is 128 bits. As SequenceNumber continues to increase for a particular MessageGroupId.

" @@ -1278,6 +1340,11 @@ "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", "locationName":"MessageAttribute" }, + "MessageSystemAttributes":{ + "shape":"MessageBodySystemAttributeMap", + "documentation":"

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

", + "locationName":"MessageSystemAttribute" + }, "MessageDeduplicationId":{ "shape":"String", "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" @@ -1300,6 +1367,10 @@ "shape":"String", "documentation":"

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" }, + "MD5OfMessageSystemAttributes":{ + "shape":"String", + "documentation":"

An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest.

" + }, "MessageId":{ "shape":"String", "documentation":"

An attribute containing the MessageId of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

" diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index de352520..c2c674da 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -1725,7 +1725,7 @@ {"shape":"InvalidAssociationVersion"}, {"shape":"AssociationVersionLimitExceeded"} ], - "documentation":"

Updates an association. You can update the association name and version, the document version, schedule, parameters, and Amazon S3 output.

When you update an association, the association immediately runs against the specified targets.

" + "documentation":"

Updates an association. You can update the association name and version, the document version, schedule, parameters, and Amazon S3 output.

In order to call this API action, your IAM user account, group, or role must be configured with permission to call the DescribeAssociation API action. If you don't have permission to call DescribeAssociation, then you receive the following error: An error occurred (AccessDeniedException) when calling the UpdateAssociation operation: User: <user_arn> is not authorized to perform: ssm:DescribeAssociation on resource: <resource_arn>

When you update an association, the association immediately runs against the specified targets.

" }, "UpdateAssociationStatus":{ "name":"UpdateAssociationStatus", @@ -3855,7 +3855,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The instance ID.

" + "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. If you use the parameter InstanceId, you cannot use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" }, "Parameters":{ "shape":"Parameters", @@ -3863,7 +3863,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets (either instances or tags) for the association.

" + "documentation":"

The targets (either instances or tags) for the association. You must specify a value for Targets if you don't specify a value for InstanceId.

" }, "ScheduleExpression":{ "shape":"ScheduleExpression", @@ -5012,7 +5012,7 @@ }, "Filters":{ "shape":"PatchOrchestratorFilterList", - "documentation":"

Each entry in the array is a structure containing:

Key (string, between 1 and 128 characters)

Values (array of strings, each string between 1 and 256 characters)

" + "documentation":"

An array of structures. Each entry in the array is a structure containing a Key, Value combination. Valid values for Key are Classification | KBId | Severity | State.

" }, "NextToken":{ "shape":"NextToken", @@ -7386,7 +7386,7 @@ }, "IsLatestVersion":{ "shape":"Boolean", - "documentation":"

Indicates whether latest version of SSM Agent is running on your instance. Some older versions of Windows Server use the EC2Config service to process SSM requests. For this reason, this field does not indicate whether or not the latest version is installed on Windows managed instances.

", + "documentation":"

Indicates whether the latest version of SSM Agent is running on your Linux Managed Instance. This field does not indicate whether or not the latest version is installed on Windows managed instances, because some older versions of Windows Server use the EC2Config service to process SSM requests.

", "box":true }, "PlatformType":{ @@ -8509,7 +8509,7 @@ }, "ParameterVersion":{ "shape":"PSParameterVersion", - "documentation":"

The specific version of the parameter on which you want to attach one or more labels. If no version is specified, the system attaches the label to the latest version.)

", + "documentation":"

The specific version of the parameter on which you want to attach one or more labels. If no version is specified, the system attaches the label to the latest version.

", "box":true }, "Labels":{ @@ -8524,6 +8524,10 @@ "InvalidLabels":{ "shape":"ParameterLabelList", "documentation":"

The label does not meet the requirements. For information about parameter label requirements, see Labeling Parameters in the AWS Systems Manager User Guide.

" + }, + "ParameterVersion":{ + "shape":"PSParameterVersion", + "documentation":"

The version of the parameter that has been labeled.

" } } }, @@ -10569,7 +10573,8 @@ "type":"string", "enum":[ "Standard", - "Advanced" + "Advanced", + "Intelligent-Tiering" ] }, "ParameterType":{ @@ -11082,7 +11087,7 @@ }, "PatchSourceConfiguration":{ "type":"string", - "max":512, + "max":1024, "min":1, "sensitive":true }, @@ -11293,7 +11298,7 @@ }, "Tier":{ "shape":"ParameterTier", - "documentation":"

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a value limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters per account and per Region. Standard parameters are offered at no additional cost.

Advanced parameters have a value limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters per account and per Region. Advanced parameters incur a charge.

If you don't specify a parameter tier when you create a new parameter, the parameter defaults to using the standard tier. You can change a standard parameter to an advanced parameter at any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter. For more information, see About Advanced Parameters in the AWS Systems Manager User Guide.

" + "documentation":"

The parameter tier to assign to a parameter.

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an AWS account. Standard parameters are offered at no additional cost.

Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an AWS account. Advanced parameters incur a charge. For more information, see About Advanced Parameters in the AWS Systems Manager User Guide.

You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter.

Using the Default Tier Configuration

In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you do not specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration.

The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default:

Options that require an advanced parameter include the following:

For more information about configuring the default tier option, see Specifying a Default Parameter Tier in the AWS Systems Manager User Guide.

" }, "Policies":{ "shape":"ParameterPolicies", @@ -11307,6 +11312,10 @@ "Version":{ "shape":"PSParameterVersion", "documentation":"

The new version number of a parameter. If you edit a parameter value, Parameter Store automatically creates a new version and assigns this new version a unique ID. You can reference a parameter version ID in API actions or in Systems Manager documents (SSM documents). By default, if you don't specify a specific version, the system returns the latest parameter value when a parameter is called.

" + }, + "Tier":{ + "shape":"ParameterTier", + "documentation":"

The tier assigned to the parameter.

" } } }, @@ -13030,7 +13039,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The version of the document that you want to update.

" + "documentation":"

(Required) The version of the document that you want to update.

" }, "DocumentFormat":{ "shape":"DocumentFormat", diff --git a/botocore/data/stepfunctions/2016-11-23/service-2.json b/botocore/data/stepfunctions/2016-11-23/service-2.json index bed06f47..a715453d 100644 --- a/botocore/data/stepfunctions/2016-11-23/service-2.json +++ b/botocore/data/stepfunctions/2016-11-23/service-2.json @@ -26,7 +26,7 @@ {"shape":"InvalidName"}, {"shape":"TooManyTags"} ], - "documentation":"

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

", + "documentation":"

Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the GetActivityTask API action and respond using SendTask* API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateActivity is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateActivity's idempotency check is based on the activity name. If a following request has different tags values, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, tags will not be updated, even if they are different.

", "idempotent":true }, "CreateStateMachine":{ @@ -46,7 +46,7 @@ {"shape":"StateMachineLimitExceeded"}, {"shape":"TooManyTags"} ], - "documentation":"

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

", + "documentation":"

Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language.

This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name and definition. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

", "idempotent":true }, "DeleteActivity":{ @@ -214,7 +214,7 @@ {"shape":"InvalidArn"}, {"shape":"ResourceNotFound"} ], - "documentation":"

List tags for a given resource.

" + "documentation":"

List tags for a given resource.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" }, "SendTaskFailure":{ "name":"SendTaskFailure", @@ -229,7 +229,7 @@ {"shape":"InvalidToken"}, {"shape":"TaskTimedOut"} ], - "documentation":"

Used by workers to report that the task identified by the taskToken failed.

" + "documentation":"

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken failed.

" }, "SendTaskHeartbeat":{ "name":"SendTaskHeartbeat", @@ -244,7 +244,7 @@ {"shape":"InvalidToken"}, {"shape":"TaskTimedOut"} ], - "documentation":"

Used by workers to report to the service that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition. This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut event.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received.

This operation is only useful for long-lived tasks to report the liveliness of the task.

" + "documentation":"

Used by activity workers and task states using the callback pattern to report to Step Functions that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition (HeartbeatSeconds). This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut entry for activities, or a TaskTimedOut entry for for tasks using the job run or callback pattern.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received. Use HeartbeatSeconds to configure the timeout interval for heartbeats.

" }, "SendTaskSuccess":{ "name":"SendTaskSuccess", @@ -260,7 +260,7 @@ {"shape":"InvalidToken"}, {"shape":"TaskTimedOut"} ], - "documentation":"

Used by workers to report that the task identified by the taskToken completed successfully.

" + "documentation":"

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken completed successfully.

" }, "StartExecution":{ "name":"StartExecution", @@ -309,7 +309,7 @@ {"shape":"ResourceNotFound"}, {"shape":"TooManyTags"} ], - "documentation":"

Add a tag to a Step Functions resource.

" + "documentation":"

Add a tag to a Step Functions resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" }, "UntagResource":{ "name":"UntagResource", @@ -393,7 +393,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the activity.

A name must not contain:

" + "documentation":"

The name of the activity.

A name must not contain:

" }, "creationDate":{ "shape":"Timestamp", @@ -500,11 +500,11 @@ "members":{ "name":{ "shape":"Name", - "documentation":"

The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

" + "documentation":"

The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

" }, "tags":{ "shape":"TagList", - "documentation":"

The list of tags to add to a resource.

" + "documentation":"

The list of tags to add to a resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" } } }, @@ -535,7 +535,7 @@ "members":{ "name":{ "shape":"Name", - "documentation":"

The name of the state machine.

A name must not contain:

" + "documentation":"

The name of the state machine.

A name must not contain:

" }, "definition":{ "shape":"Definition", @@ -547,7 +547,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

Tags to be added when creating a state machine.

" + "documentation":"

Tags to be added when creating a state machine.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" } } }, @@ -628,7 +628,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the activity.

A name must not contain:

" + "documentation":"

The name of the activity.

A name must not contain:

" }, "creationDate":{ "shape":"Timestamp", @@ -666,7 +666,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the execution.

A name must not contain:

" + "documentation":"

The name of the execution.

A name must not contain:

" }, "status":{ "shape":"ExecutionStatus", @@ -758,7 +758,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the state machine.

A name must not contain:

" + "documentation":"

The name of the state machine.

A name must not contain:

" }, "status":{ "shape":"StateMachineStatus", @@ -856,7 +856,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the execution.

A name must not contain:

" + "documentation":"

The name of the execution.

A name must not contain:

" }, "status":{ "shape":"ExecutionStatus", @@ -1054,6 +1054,26 @@ "executionSucceededEventDetails":{"shape":"ExecutionSucceededEventDetails"}, "executionAbortedEventDetails":{"shape":"ExecutionAbortedEventDetails"}, "executionTimedOutEventDetails":{"shape":"ExecutionTimedOutEventDetails"}, + "mapStateStartedEventDetails":{ + "shape":"MapStateStartedEventDetails", + "documentation":"

Contains details about Map state that was started.

" + }, + "mapIterationStartedEventDetails":{ + "shape":"MapIterationEventDetails", + "documentation":"

Contains details about an iteration of a Map state that was started.

" + }, + "mapIterationSucceededEventDetails":{ + "shape":"MapIterationEventDetails", + "documentation":"

Contains details about an iteration of a Map state that succeeded.

" + }, + "mapIterationFailedEventDetails":{ + "shape":"MapIterationEventDetails", + "documentation":"

Contains details about an iteration of a Map state that failed.

" + }, + "mapIterationAbortedEventDetails":{ + "shape":"MapIterationEventDetails", + "documentation":"

Contains details about an iteration of a Map state that was aborted.

" + }, "lambdaFunctionFailedEventDetails":{"shape":"LambdaFunctionFailedEventDetails"}, "lambdaFunctionScheduleFailedEventDetails":{"shape":"LambdaFunctionScheduleFailedEventDetails"}, "lambdaFunctionScheduledEventDetails":{"shape":"LambdaFunctionScheduledEventDetails"}, @@ -1080,47 +1100,57 @@ "type":"string", "enum":[ "ActivityFailed", - "ActivityScheduleFailed", "ActivityScheduled", + "ActivityScheduleFailed", "ActivityStarted", "ActivitySucceeded", "ActivityTimedOut", "ChoiceStateEntered", "ChoiceStateExited", - "TaskFailed", - "TaskScheduled", - "TaskStartFailed", - "TaskStarted", - "TaskSubmitFailed", - "TaskSubmitted", - "TaskSucceeded", - "TaskTimedOut", + "ExecutionAborted", "ExecutionFailed", "ExecutionStarted", "ExecutionSucceeded", - "ExecutionAborted", "ExecutionTimedOut", "FailStateEntered", "LambdaFunctionFailed", - "LambdaFunctionScheduleFailed", "LambdaFunctionScheduled", - "LambdaFunctionStartFailed", + "LambdaFunctionScheduleFailed", "LambdaFunctionStarted", + "LambdaFunctionStartFailed", "LambdaFunctionSucceeded", "LambdaFunctionTimedOut", - "SucceedStateEntered", - "SucceedStateExited", - "TaskStateAborted", - "TaskStateEntered", - "TaskStateExited", - "PassStateEntered", - "PassStateExited", + "MapIterationAborted", + "MapIterationFailed", + "MapIterationStarted", + "MapIterationSucceeded", + "MapStateAborted", + "MapStateEntered", + "MapStateExited", + "MapStateFailed", + "MapStateStarted", + "MapStateSucceeded", "ParallelStateAborted", "ParallelStateEntered", "ParallelStateExited", "ParallelStateFailed", "ParallelStateStarted", "ParallelStateSucceeded", + "PassStateEntered", + "PassStateExited", + "SucceedStateEntered", + "SucceedStateExited", + "TaskFailed", + "TaskScheduled", + "TaskStarted", + "TaskStartFailed", + "TaskStateAborted", + "TaskStateEntered", + "TaskStateExited", + "TaskSubmitFailed", + "TaskSubmitted", + "TaskSucceeded", + "TaskTimedOut", "WaitStateAborted", "WaitStateEntered", "WaitStateExited" @@ -1308,7 +1338,7 @@ "documentation":"

The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.

This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.

" }, "nextToken":{ - "shape":"PageToken", + "shape":"ListExecutionsPageToken", "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" } } @@ -1322,11 +1352,16 @@ "documentation":"

The list of matching executions.

" }, "nextToken":{ - "shape":"PageToken", + "shape":"ListExecutionsPageToken", "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" } } }, + "ListExecutionsPageToken":{ + "type":"string", + "max":3096, + "min":1 + }, "ListStateMachinesInput":{ "type":"structure", "members":{ @@ -1370,6 +1405,30 @@ } } }, + "MapIterationEventDetails":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the iteration’s parent Map state.

" + }, + "index":{ + "shape":"UnsignedInteger", + "documentation":"

The index of the array belonging to the Map state iteration.

" + } + }, + "documentation":"

Contains details about an iteration of a Map state.

" + }, + "MapStateStartedEventDetails":{ + "type":"structure", + "members":{ + "length":{ + "shape":"UnsignedInteger", + "documentation":"

The size of the array for Map state iterations.

" + } + }, + "documentation":"

Details about a Map state that was started.

" + }, "MissingRequiredParameter":{ "type":"structure", "members":{ @@ -1399,7 +1458,7 @@ "message":{"shape":"ErrorMessage"}, "resourceName":{"shape":"Arn"} }, - "documentation":"

Could not fine the referenced resource. Only state machine and activity ARNs are supported.

", + "documentation":"

Could not find the referenced resource. Only state machine and activity ARNs are supported.

", "exception":true }, "ReverseOrder":{"type":"boolean"}, @@ -1409,7 +1468,7 @@ "members":{ "taskToken":{ "shape":"TaskToken", - "documentation":"

The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTask::taskToken).

" + "documentation":"

The token that represents this task. Task tokens are generated by Step Functions when tasks are assigned to a worker, or in the context object when a workflow enters a task state. See GetActivityTaskOutput$taskToken.

" }, "error":{ "shape":"SensitiveError", @@ -1432,7 +1491,7 @@ "members":{ "taskToken":{ "shape":"TaskToken", - "documentation":"

The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken).

" + "documentation":"

The token that represents this task. Task tokens are generated by Step Functions when tasks are assigned to a worker, or in the context object when a workflow enters a task state. See GetActivityTaskOutput$taskToken.

" } } }, @@ -1450,7 +1509,7 @@ "members":{ "taskToken":{ "shape":"TaskToken", - "documentation":"

The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken).

" + "documentation":"

The token that represents this task. Task tokens are generated by Step Functions when tasks are assigned to a worker, or in the context object when a workflow enters a task state. See GetActivityTaskOutput$taskToken.

" }, "output":{ "shape":"SensitiveData", @@ -1495,7 +1554,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

" + "documentation":"

The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

" }, "input":{ "shape":"SensitiveData", @@ -1541,7 +1600,7 @@ "members":{ "name":{ "shape":"Name", - "documentation":"

The name of the state.

A name must not contain:

" + "documentation":"

The name of the state.

A name must not contain:

" }, "output":{ "shape":"SensitiveData", @@ -1600,7 +1659,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the state machine.

A name must not contain:

" + "documentation":"

The name of the state machine.

A name must not contain:

" }, "creationDate":{ "shape":"Timestamp", @@ -1656,7 +1715,7 @@ "documentation":"

The value of a tag.

" } }, - "documentation":"

Tags are key-value pairs that can be associated with Step Functions state machines and activities.

" + "documentation":"

Tags are key-value pairs that can be associated with Step Functions state machines and activities.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" }, "TagKey":{ "type":"string", @@ -1684,7 +1743,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The list of tags to add to a resource.

Tags may only contain unicode letters, digits, whitespace, or these symbols: _ . : / = + - @.

" + "documentation":"

The list of tags to add to a resource.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

" } } }, @@ -1927,6 +1986,10 @@ "documentation":"

You've exceeded the number of tags allowed for a resource. See the Limits Topic in the AWS Step Functions Developer Guide.

", "exception":true }, + "UnsignedInteger":{ + "type":"integer", + "min":0 + }, "UntagResourceInput":{ "type":"structure", "required":[ diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index e733c733..f7ee221e 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -24,7 +24,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the region you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

" + "documentation":"

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

" }, "AddCache":{ "name":"AddCache", @@ -164,7 +164,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" }, "CreateSMBFileShare":{ "name":"CreateSMBFileShare", @@ -670,7 +670,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Lists gateways owned by an AWS account in a region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

" + "documentation":"

Lists gateways owned by an AWS account in an AWS Region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

" }, "ListLocalDisks":{ "name":"ListLocalDisks", @@ -768,7 +768,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sends you notification through CloudWatch Events when all files written to your NFS file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the NFS file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

" + "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

" }, "RefreshCache":{ "name":"RefreshCache", @@ -1006,7 +1006,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

" + "documentation":"

Updates the SMB security strategy on a file gateway. This action is only supported in file gateways.

This API is called Security level in the User Guide.

A higher security level can affect performance of the gateway.

" }, "UpdateSnapshotSchedule":{ "name":"UpdateSnapshotSchedule", @@ -1061,7 +1061,7 @@ }, "GatewayRegion":{ "shape":"RegionId", - "documentation":"

A value that indicates the region where you want to store your data. The gateway region specified must be the same region as the region in your Host header in the request. For more information about available regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

Valid Values: See AWS Storage Gateway Regions and Endpoints in the AWS General Reference.

" + "documentation":"

A value that indicates the AWS Region where you want to store your data. The gateway AWS Region specified must be the same AWS Region as the AWS Region in your Host header in the request. For more information about available AWS Regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

Valid Values: See AWS Storage Gateway Regions and Endpoints in the AWS General Reference.

" }, "GatewayType":{ "shape":"GatewayType", @@ -1077,7 +1077,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

A list of up to 50 tags that can be assigned to the gateway. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

" + "documentation":"

A list of up to 50 tags that you can assign to the gateway. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers that can be represented in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256 characters.

" } }, "documentation":"

A JSON object containing one or more of the following fields:

" @@ -1087,7 +1087,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.

For gateways activated prior to September 02, 2015, the gateway ARN contains the gateway name rather than the gateway ID. Changing the name of the gateway has no effect on the gateway ARN.

" + "documentation":"

AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and AWS Region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.

For gateways activated prior to September 02, 2015, the gateway ARN contains the gateway name rather than the gateway ID. Changing the name of the gateway has no effect on the gateway ARN.

" }, "ActivationKey":{ "type":"string", @@ -1421,6 +1421,10 @@ "max":100, "min":5 }, + "CloudWatchLogGroupARN":{ + "type":"string", + "max":562 + }, "CreateCachediSCSIVolumeInput":{ "type":"structure", "required":[ @@ -1466,7 +1470,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

A list of up to 50 tags that can be assigned to a cached volume. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

" + "documentation":"

A list of up to 50 tags that you can assign to a cached volume. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers that you can represent in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256 characters.

" } } }, @@ -1624,7 +1628,7 @@ }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

" }, "ValidUserList":{ "shape":"FileShareUserList", @@ -1669,6 +1673,10 @@ "SnapshotDescription":{ "shape":"SnapshotDescription", "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A list of up to 50 tags that can be assigned to a snapshot. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

" } } }, @@ -1799,7 +1807,7 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and region.

" + "documentation":"

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

" }, "TapeSizeInBytes":{ "shape":"TapeSize", @@ -1850,7 +1858,7 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and region.

" + "documentation":"

The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

" }, "TapeSizeInBytes":{ "shape":"TapeSize", @@ -2051,7 +2059,7 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and region.

" + "documentation":"

The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

" }, "TapeARN":{ "shape":"TapeARN", @@ -2251,6 +2259,10 @@ "VPCEndpoint":{ "shape":"string", "documentation":"

The configuration settings for the virtual private cloud (VPC) endpoint for your gateway.

" + }, + "CloudWatchLogGroupARN":{ + "shape":"CloudWatchLogGroupARN", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the gateway.

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -2353,7 +2365,7 @@ }, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

The type of security strategy that was specified for file gateway.

ClientSpecified: SMBv1 is enabled, SMB signing is offered but not required, SMB encryption is offered but not required.

MandatorySigning: SMBv1 is disabled, SMB signing is required, SMB encryption is offered but not required.

MandatoryEncryption: SMBv1 is disabled, SMB signing is offered but not required, SMB encryption is required.

" + "documentation":"

The type of security strategy that was specified for file gateway.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" } } }, @@ -2390,6 +2402,10 @@ "Timezone":{ "shape":"GatewayTimezone", "documentation":"

A value that indicates the time zone of the gateway.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A list of up to 50 tags assigned to the snapshot schedule, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ListTagsForResource API operation.

" } } }, @@ -2901,7 +2917,7 @@ }, "GatewayARN":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

", + "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

", "max":500, "min":50 }, @@ -2919,7 +2935,7 @@ }, "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

" + "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

" }, "GatewayType":{ "shape":"GatewayType", @@ -3052,7 +3068,7 @@ "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

" + "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

" }, "DomainName":{ "shape":"DomainName", @@ -3060,7 +3076,7 @@ }, "OrganizationalUnit":{ "shape":"OrganizationalUnit", - "documentation":"

The organizational unit (OU) is a container with an Active Directory that can hold users, groups, computers, and other OUs and this parameter specifies the OU that the gateway will join within the AD domain.

" + "documentation":"

The organizational unit (OU) is a container in an Active Directory that can hold users, groups, computers, and other OUs and this parameter specifies the OU that the gateway will join within the AD domain.

" }, "DomainControllers":{ "shape":"Hosts", @@ -3596,7 +3612,7 @@ }, "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and region.

You retrieve archived virtual tapes to only one gateway and the gateway must be a tape gateway.

" + "documentation":"

The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

You retrieve archived virtual tapes to only one gateway and the gateway must be a tape gateway.

" } }, "documentation":"

RetrieveTapeArchiveInput

" @@ -4088,7 +4104,7 @@ }, "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

" + "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

" }, "PoolId":{ "shape":"PoolId", @@ -4221,6 +4237,10 @@ "GatewayTimezone":{ "shape":"GatewayTimezone", "documentation":"

A value that indicates the time zone of the gateway.

" + }, + "CloudWatchLogGroupARN":{ + "shape":"CloudWatchLogGroupARN", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

For more information, see What Is Amazon CloudWatch Logs?.

" } } }, @@ -4388,7 +4408,7 @@ }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "ValidUserList":{ "shape":"FileShareUserList", @@ -4421,7 +4441,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

Specifies the type of security strategy.

ClientSpecified: SMBv1 is enabled, SMB signing is offered but not required, SMB encryption is offered but not required.

MandatorySigning: SMBv1 is disabled, SMB signing is required, SMB encryption is offered but not required.

MandatoryEncryption: SMBv1 is disabled, SMB signing is offered but not required, SMB encryption is required.

" + "documentation":"

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" } } }, @@ -4669,5 +4689,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" + "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" } diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json index c7bb047f..fbd5f99a 100644 --- a/botocore/data/transcribe/2017-10-26/service-2.json +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -311,6 +311,12 @@ "exception":true, "fault":true }, + "KMSKeyId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" + }, "LanguageCode":{ "type":"string", "enum":[ @@ -327,7 +333,9 @@ "es-ES", "en-IN", "hi-IN", - "ar-SA" + "ar-SA", + "ru-RU", + "zh-CN" ] }, "LimitExceededException":{ @@ -450,7 +458,8 @@ }, "NextToken":{ "type":"string", - "max":8192 + "max":8192, + "pattern":".+" }, "NotFoundException":{ "type":"structure", @@ -462,6 +471,7 @@ }, "OutputBucketName":{ "type":"string", + "max":64, "pattern":"[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]" }, "OutputLocationType":{ @@ -474,7 +484,8 @@ "Phrase":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":".+" }, "Phrases":{ "type":"list", @@ -507,7 +518,6 @@ "required":[ "TranscriptionJobName", "LanguageCode", - "MediaFormat", "Media" ], "members":{ @@ -521,7 +531,7 @@ }, "MediaSampleRateHertz":{ "shape":"MediaSampleRateHertz", - "documentation":"

The sample rate, in Hertz, of the audio track in the input media file.

" + "documentation":"

The sample rate, in Hertz, of the audio track in the input media file.

If you do not specify the media sample rate, Amazon Transcribe determines the sample rate. If you specify the sample rate, it must match the sample rate detected by Amazon Transcribe. In most cases, you should leave the MediaSampleRateHertz field blank and let Amazon Transcribe determine the sample rate.

" }, "MediaFormat":{ "shape":"MediaFormat", @@ -533,8 +543,9 @@ }, "OutputBucketName":{ "shape":"OutputBucketName", - "documentation":"

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcription in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

Amazon Transcribe uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket. You can't specify your own encryption key.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

" + "documentation":"

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcription in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

Amazon Transcribe uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket. You can't specify your own encryption key.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

" }, + "OutputEncryptionKMSKeyId":{"shape":"KMSKeyId"}, "Settings":{ "shape":"Settings", "documentation":"

A Settings object that provides optional settings for a transcription job.

" @@ -661,7 +672,7 @@ "documentation":"

Indicates the location of the output of the transcription job.

If the value is CUSTOMER_BUCKET then the location is the S3 bucket specified in the outputBucketName field when the transcription job was started with the StartTranscriptionJob operation.

If the value is SERVICE_BUCKET then the output is stored by Amazon Transcribe and can be retrieved using the URI in the GetTranscriptionJob response's TranscriptFileUri field.

" } }, - "documentation":"

Provides a summary of information about a transcription job. .

" + "documentation":"

Provides a summary of information about a transcription job.

" }, "UpdateVocabularyRequest":{ "type":"structure", @@ -712,7 +723,8 @@ "Uri":{ "type":"string", "max":2000, - "min":1 + "min":1, + "pattern":"(s3://|http(s*)://).+" }, "Vocabularies":{ "type":"list", diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index d460c3f5..858e6e62 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -28,7 +28,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceExistsException"} ], - "documentation":"

Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. The call returns the ServerId property assigned by the service to the newly created server. Reference this ServerId property when you make updates to your server, or work with users.

The response returns the ServerId value for the newly created server.

" + "documentation":"

Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. When you make updates to your server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

" }, "CreateUser":{ "name":"CreateUser", @@ -45,7 +45,7 @@ {"shape":"ResourceExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds a user and associate them with an existing Secure File Transfer Protocol (SFTP) server. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

The response returns the UserName and ServerId values of the new user for that server.

" + "documentation":"

Creates a user and associates them with an existing Secure File Transfer Protocol (SFTP) server. You can only create and associate users with SFTP servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

" }, "DeleteServer":{ "name":"DeleteServer", @@ -60,7 +60,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the Secure File Transfer Protocol (SFTP) server that you specify. If you used SERVICE_MANAGED as your IdentityProviderType, you need to delete all users associated with this server before deleting the server itself

No response returns from this call.

" + "documentation":"

Deletes the Secure File Transfer Protocol (SFTP) server that you specify.

No response returns from this operation.

" }, "DeleteSshPublicKey":{ "name":"DeleteSshPublicKey", @@ -75,7 +75,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes a user's Secure Shell (SSH) public key.

No response is returned from this call.

" + "documentation":"

Deletes a user's Secure Shell (SSH) public key.

No response is returned from this operation.

" }, "DeleteUser":{ "name":"DeleteUser", @@ -90,7 +90,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the user belonging to the server you specify.

No response returns from this call.

When you delete a user from a server, the user's information is lost.

" + "documentation":"

Deletes the user belonging to the server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

" }, "DescribeServer":{ "name":"DescribeServer", @@ -218,7 +218,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.

The states of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

" + "documentation":"

Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

" }, "TagResource":{ "name":"TagResource", @@ -248,7 +248,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

If the IdentityProviderType of the server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this method to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

" + "documentation":"

If the IdentityProviderType of the server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

" }, "UntagResource":{ "name":"UntagResource", @@ -308,23 +308,23 @@ "members":{ "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. This parameter is required when you specify a value for the EndpointType parameter.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that you want your SFTP server connect to. If you connect to a VPC endpoint, your SFTP server isn't accessible over the public internet.

" + "documentation":"

The type of VPC endpoint that you want your SFTP server to connect to. If you connect to a VPC endpoint, your SFTP server isn't accessible over the public internet.

" }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see change-host-key in the AWS SFTP User Guide.

" + "documentation":"

The RSA private key as generated by the ssh-keygen -N \"\" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see \"https://docs.aws.amazon.com/transfer/latest/userguide/change-host-key\" in the AWS SFTP User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", - "documentation":"

An array containing all of the information required to call a customer-supplied authentication API. This parameter is not required when the IdentityProviderType value of server that is created uses the SERVICE_MANAGED authentication method.

" + "documentation":"

This parameter is required when the IdentityProviderType is set to API_GATEWAY. Accepts an array containing all of the information required to call a customer-supplied authentication API, including the API Gateway URL. This property is not required when the IdentityProviderType is set to SERVICE_MANAGED.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

The mode of authentication enabled for this service. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the service. An IdentityProviderType value of API_GATEWAY indicates that user authentication requires a call to an API Gateway endpoint URL provided by you to integrate an identity provider of your choice.

" + "documentation":"

Specifies the mode of authentication for the SFTP server. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

" }, "LoggingRole":{ "shape":"Role", @@ -360,7 +360,7 @@ }, "Policy":{ "shape":"Policy", - "documentation":"

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

" + "documentation":"

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see \"https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\">Creating a Scope-Down Policy.

For more information, see \"https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\" in the AWS Security Token Service API Reference.

" }, "Role":{ "shape":"Role", @@ -372,7 +372,7 @@ }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", - "documentation":"

The public portion of the Secure Shall (SSH) key used to authenticate the user to the SFTP server.

" + "documentation":"

The public portion of the Secure Shell (SSH) key used to authenticate the user to the SFTP server.

" }, "Tags":{ "shape":"Tags", @@ -484,7 +484,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer service and perform file transfer tasks.

" + "documentation":"

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer for SFTP service and perform file transfer tasks.

" } } }, @@ -523,7 +523,7 @@ }, "HostKeyFingerprint":{ "shape":"HostKeyFingerprint", - "documentation":"

This value contains the Message-Digest Algorithm (MD5) hash of the server's host key. This value is equivalent to the output of ssh-keygen -l -E md5 -f my-new-server-key command.

" + "documentation":"

This value contains the message-digest algorithm (MD5) hash of the server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", @@ -531,19 +531,19 @@ }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

This property defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED, means that you are using this Server to store and access SFTP user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" + "documentation":"

This property defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this server to store and access SFTP user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

This property is an AWS Identity and Access Management (IAM) entity that allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be view in your CloudWatch logs.

" + "documentation":"

This property is an AWS Identity and Access Management (IAM) entity that allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

This property is a unique system assigned identifier for the SFTP server that you instantiate.

" + "documentation":"

This property is a unique system-assigned identifier for the SFTP server that you instantiate.

" }, "State":{ "shape":"State", - "documentation":"

The condition of the SFTP server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicated that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

The condition of the SFTP server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "Tags":{ "shape":"Tags", @@ -554,7 +554,7 @@ "documentation":"

The number of users that are assigned to the SFTP server you specified with the ServerId.

" } }, - "documentation":"

Describe the properties of the server that was specified. Information returned includes: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, server Id and state, and assigned tags or metadata.

" + "documentation":"

Describes the properties of the server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, the server ID and state, and assigned tags or metadata.

" }, "DescribedUser":{ "type":"structure", @@ -566,7 +566,7 @@ }, "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

This property specifies the landing directory (or folder) which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example would be: /bucket_name/home/username .

" + "documentation":"

This property specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example is /bucket_name/home/username .

" }, "Policy":{ "shape":"Policy", @@ -589,7 +589,7 @@ "documentation":"

This property is the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your SFTP server.

" } }, - "documentation":"

Returns properties of the user that you wish to describe.

" + "documentation":"

Returns properties of the user that you want to describe.

" }, "EndpointDetails":{ "type":"structure", @@ -615,7 +615,8 @@ }, "HostKey":{ "type":"string", - "max":4096 + "max":4096, + "sensitive":true }, "HostKeyFingerprint":{"type":"string"}, "IdentityProviderDetails":{ @@ -623,18 +624,18 @@ "members":{ "Url":{ "shape":"Url", - "documentation":"

The IdentityProviderDetail parameter contains the location of the service endpoint used to authenticate users.

" + "documentation":"

The Url parameter provides contains the location of the service endpoint used to authenticate users.

" }, "InvocationRole":{ "shape":"Role", - "documentation":"

The Role parameter provides the type of InvocationRole used to authenticate the user account.

" + "documentation":"

The InvocationRole parameter provides the type of InvocationRole used to authenticate the user account.

" } }, - "documentation":"

Returns information related to the type of user authentication that is in use for a server's users. A server can only have one method of authentication.

" + "documentation":"

Returns information related to the type of user authentication that is in use for a server's users. A server can have only one method of authentication.

" }, "IdentityProviderType":{ "type":"string", - "documentation":"

Returns information related to the type of user authentication that is in use for a server's users. For SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on an SFTP server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. A server can only have one method of authentication.

", + "documentation":"

Returns information related to the type of user authentication that is in use for a server's users. For SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on an SFTP server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. A server can have only one method of authentication.

", "enum":[ "SERVICE_MANAGED", "API_GATEWAY" @@ -683,7 +684,7 @@ "documentation":"

A user name assigned to the ServerID value that you specified.

" } }, - "documentation":"

This response identifies the user, server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associate with.

" + "documentation":"

This response identifies the user, the server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

" }, "InternalServiceError":{ "type":"structure", @@ -722,7 +723,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.

" + "documentation":"

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.

" } } }, @@ -754,7 +755,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When you request additional results from the ListTagsForResource call, a NextToken parameter is returned in the input. You can then pass in a subsequent command the NextToken parameter to continue listing additional tags.

" + "documentation":"

When you request additional results from the ListTagsForResource operation, a NextToken parameter is returned in the input. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.

" } } }, @@ -767,11 +768,11 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When you can get additional results from the ListTagsForResource call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional tags.

" + "documentation":"

When you can get additional results from the ListTagsForResource call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define that you can use for any purpose.

" + "documentation":"

Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define.

" } } }, @@ -785,11 +786,11 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional users.

" + "documentation":"

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional users.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server that has users are assigned to it.

" + "documentation":"

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server that has users assigned to it.

" } } }, @@ -802,7 +803,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command the NextToken parameter to continue listing additional users.

" + "documentation":"

When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional users.

" }, "ServerId":{ "shape":"ServerId", @@ -824,7 +825,7 @@ }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

The authentication method used to validate a user for the server that was specified. listed. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" + "documentation":"

The authentication method used to validate a user for the server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" }, "EndpointType":{ "shape":"EndpointType", @@ -840,7 +841,7 @@ }, "State":{ "shape":"State", - "documentation":"

This property describes the condition of the SFTP server for the server that was described. A value of ONLINE> indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicated that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

This property describes the condition of the SFTP server for the server that was described. A value of ONLINE> indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "UserCount":{ "shape":"UserCount", @@ -859,7 +860,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

This property is the unique Amazon Resource Name (ARN) for the user that you wish to learn about.

" + "documentation":"

This property is the unique Amazon Resource Name (ARN) for the user that you want to learn about.

" }, "HomeDirectory":{ "shape":"HomeDirectory", @@ -867,7 +868,7 @@ }, "Role":{ "shape":"Role", - "documentation":"

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that in this case allows the SFTP server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

" + "documentation":"

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows the SFTP server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

" }, "SshPublicKeyCount":{ "shape":"SshPublicKeyCount", @@ -932,6 +933,7 @@ "exception":true }, "ResourceType":{"type":"string"}, + "Response":{"type":"string"}, "Role":{ "type":"string", "pattern":"arn:.*role/.*" @@ -1080,11 +1082,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.

" + "documentation":"

A system-assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.

" }, "UserName":{ "shape":"UserName", - "documentation":"

This request parameter is name of the user account to be tested.

" + "documentation":"

This request parameter is the name of the user account to be tested.

" }, "UserPassword":{ "shape":"UserPassword", @@ -1099,14 +1101,18 @@ "Url" ], "members":{ - "Message":{ - "shape":"Message", - "documentation":"

The result of the authorization test as a message.

" + "Response":{ + "shape":"Response", + "documentation":"

The response that is returned from your API Gateway.

" }, "StatusCode":{ "shape":"StatusCode", "documentation":"

The HTTP status code that is the response from your API Gateway.

" }, + "Message":{ + "shape":"Message", + "documentation":"

A message that indicates whether the test was successful or not.

" + }, "Url":{ "shape":"Url", "documentation":"

The endpoint of the service used to authenticate a user.

" @@ -1144,7 +1150,7 @@ }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see change-host-key in the AWS SFTP User Guide.

" + "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see \"https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key\" in the AWS SFTP User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", @@ -1179,11 +1185,11 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

The HomeDirectory parameter specifies the landing directory (folder) for a user when they log in to the server using their client. An example would be: /home/username .

" + "documentation":"

A parameter that specifies the landing directory (folder) for a user when they log in to the server using their client. An example is /home/username .

" }, "Policy":{ "shape":"Policy", - "documentation":"

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down users access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

" + "documentation":"

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see \"https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\">Creating a Scope-Down Policy.

For more information, see \"https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\" in the AWS Security Token Service API Reference.

" }, "Role":{ "shape":"Role", @@ -1195,7 +1201,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user and is associated with a server as specified by the ServerId. This is the string that will be used by your user when they log in to your SFTP server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

" + "documentation":"

A unique string that identifies a user and is associated with a server as specified by the ServerId. This is the string that will be used by your user when they log in to your SFTP server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

" } } }, @@ -1223,11 +1229,14 @@ "type":"string", "pattern":"^[a-zA-Z0-9_][a-zA-Z0-9_-]{2,31}$" }, - "UserPassword":{"type":"string"}, + "UserPassword":{ + "type":"string", + "sensitive":true + }, "VpcEndpointId":{ "type":"string", "pattern":"^vpce-[0-9a-f]{17}$" } }, - "documentation":"

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and setup.

" + "documentation":"

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and set up.

" } diff --git a/botocore/data/waf-regional/2016-11-28/service-2.json b/botocore/data/waf-regional/2016-11-28/service-2.json index a22d38a4..58ae7c2f 100644 --- a/botocore/data/waf-regional/2016-11-28/service-2.json +++ b/botocore/data/waf-regional/2016-11-28/service-2.json @@ -1059,7 +1059,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFServiceLinkedRoleErrorException"} ], - "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose .

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", @@ -3163,7 +3163,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

" } }, - "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "IPSetDescriptor":{ "type":"structure", @@ -3328,7 +3328,7 @@ "members":{ "NextMarker":{ "shape":"NextMarker", - "documentation":"

If you specify a value for Limit and you have more IPSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets. For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of IPSets.

" + "documentation":"

AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets. For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of IPSets.

" }, "Limit":{ "shape":"PaginationLimit", @@ -3341,7 +3341,7 @@ "members":{ "NextMarker":{ "shape":"NextMarker", - "documentation":"

If you have more IPSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more IPSet objects, submit another ListIPSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

" + "documentation":"

To list more IPSet objects, submit another ListIPSets request, and in the next request use the NextMarker response value as the NextMarker value.

" }, "IPSets":{ "shape":"IPSetSummaries", @@ -3835,7 +3835,7 @@ "members":{ "LoggingConfiguration":{ "shape":"LoggingConfiguration", - "documentation":"

The Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.

" + "documentation":"

The Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.

When specifying Type in RedactedFields, you must use one of the following values: URI, QUERY_STRING, HEADER, or METHOD.

" } } }, @@ -3913,7 +3913,7 @@ "RateLimit":{ "type":"long", "max":2000000000, - "min":2000 + "min":100 }, "RedactedFields":{ "type":"list", diff --git a/botocore/data/waf/2015-08-24/service-2.json b/botocore/data/waf/2015-08-24/service-2.json index 7c50b596..e3abe896 100644 --- a/botocore/data/waf/2015-08-24/service-2.json +++ b/botocore/data/waf/2015-08-24/service-2.json @@ -993,7 +993,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFServiceLinkedRoleErrorException"} ], - "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose .

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", @@ -3041,7 +3041,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

" } }, - "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "IPSetDescriptor":{ "type":"structure", @@ -3206,7 +3206,7 @@ "members":{ "NextMarker":{ "shape":"NextMarker", - "documentation":"

If you specify a value for Limit and you have more IPSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets. For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of IPSets.

" + "documentation":"

AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets. For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of IPSets.

" }, "Limit":{ "shape":"PaginationLimit", @@ -3219,7 +3219,7 @@ "members":{ "NextMarker":{ "shape":"NextMarker", - "documentation":"

If you have more IPSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more IPSet objects, submit another ListIPSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

" + "documentation":"

To list more IPSet objects, submit another ListIPSets request, and in the next request use the NextMarker response value as the NextMarker value.

" }, "IPSets":{ "shape":"IPSetSummaries", @@ -3690,7 +3690,7 @@ "members":{ "LoggingConfiguration":{ "shape":"LoggingConfiguration", - "documentation":"

The Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.

" + "documentation":"

The Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.

When specifying Type in RedactedFields, you must use one of the following values: URI, QUERY_STRING, HEADER, or METHOD.

" } } }, @@ -3768,7 +3768,7 @@ "RateLimit":{ "type":"long", "max":2000000000, - "min":2000 + "min":100 }, "RedactedFields":{ "type":"list", diff --git a/botocore/data/workmailmessageflow/2019-05-01/paginators-1.json b/botocore/data/workmailmessageflow/2019-05-01/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/workmailmessageflow/2019-05-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/workmailmessageflow/2019-05-01/service-2.json b/botocore/data/workmailmessageflow/2019-05-01/service-2.json new file mode 100644 index 00000000..6d4d4fda --- /dev/null +++ b/botocore/data/workmailmessageflow/2019-05-01/service-2.json @@ -0,0 +1,74 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-05-01", + "endpointPrefix":"workmailmessageflow", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon WorkMail Message Flow", + "serviceId":"WorkMailMessageFlow", + "signatureVersion":"v4", + "uid":"workmailmessageflow-2019-05-01" + }, + "operations":{ + "GetRawMessageContent":{ + "name":"GetRawMessageContent", + "http":{ + "method":"GET", + "requestUri":"/messages/{messageId}" + }, + "input":{"shape":"GetRawMessageContentRequest"}, + "output":{"shape":"GetRawMessageContentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the raw content of an in-transit email message, in MIME format.

" + } + }, + "shapes":{ + "GetRawMessageContentRequest":{ + "type":"structure", + "required":["messageId"], + "members":{ + "messageId":{ + "shape":"messageIdType", + "documentation":"

The identifier of the email message to retrieve.

", + "location":"uri", + "locationName":"messageId" + } + } + }, + "GetRawMessageContentResponse":{ + "type":"structure", + "required":["messageContent"], + "members":{ + "messageContent":{ + "shape":"messageContentBlob", + "documentation":"

The raw content of the email message, in MIME format.

" + } + }, + "payload":"messageContent" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

The requested email message is not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "errorMessage":{"type":"string"}, + "messageContentBlob":{ + "type":"blob", + "streaming":true + }, + "messageIdType":{ + "type":"string", + "max":120, + "min":1, + "pattern":"[a-z0-9\\-]*" + } + }, + "documentation":"

The WorkMail Message Flow API provides access to email messages as they are being sent and received by a WorkMail organization.

" +} diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 87582130..8ede9185 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -264,6 +264,21 @@ ], "documentation":"

Retrieves a list that describes one or more specified images, if the image identifiers are provided. Otherwise, all images in the account are described.

" }, + "DescribeWorkspaceSnapshots":{ + "name":"DescribeWorkspaceSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspaceSnapshotsRequest"}, + "output":{"shape":"DescribeWorkspaceSnapshotsResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the snapshots for the specified WorkSpace.

" + }, "DescribeWorkspaces":{ "name":"DescribeWorkspaces", "http":{ @@ -425,6 +440,21 @@ "output":{"shape":"RebuildWorkspacesResult"}, "documentation":"

Rebuilds the specified WorkSpace.

You cannot rebuild a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY.

Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.

This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.

" }, + "RestoreWorkspace":{ + "name":"RestoreWorkspace", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreWorkspaceRequest"}, + "output":{"shape":"RestoreWorkspaceResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Restores the specified WorkSpace to its last known healthy state.

You cannot restore a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY.

Restoring a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Restore a WorkSpace.

This operation is asynchronous and returns before the WorkSpace is completely restored.

" + }, "RevokeIpRules":{ "name":"RevokeIpRules", "http":{ @@ -1071,6 +1101,29 @@ } } }, + "DescribeWorkspaceSnapshotsRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The identifier of the WorkSpace.

" + } + } + }, + "DescribeWorkspaceSnapshotsResult":{ + "type":"structure", + "members":{ + "RebuildSnapshots":{ + "shape":"SnapshotList", + "documentation":"

Information about the snapshots that can be used to rebuild a WorkSpace. These snapshots include the root volume.

" + }, + "RestoreSnapshots":{ + "shape":"SnapshotList", + "documentation":"

Information about the snapshots that can be used to restore a WorkSpace. These snapshots include both the root volume and the user volume.

" + } + } + }, "DescribeWorkspacesConnectionStatusRequest":{ "type":"structure", "members":{ @@ -1245,7 +1298,7 @@ "documentation":"

The text of the error message that is returned if the WorkSpace cannot be rebooted.

" } }, - "documentation":"

Describes a WorkSpace that could not be rebooted. (RebootWorkspaces), rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), or stopped (StopWorkspaces).

" + "documentation":"

Describes a WorkSpace that could not be rebooted. (RebootWorkspaces), rebuilt (RebuildWorkspaces), restored (RestoreWorkspace), terminated (TerminateWorkspaces), started (StartWorkspaces), or stopped (StopWorkspaces).

" }, "ImportWorkspaceImageRequest":{ "type":"structure", @@ -1706,6 +1759,21 @@ "documentation":"

The specified resource is not available.

", "exception":true }, + "RestoreWorkspaceRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The identifier of the WorkSpace.

" + } + } + }, + "RestoreWorkspaceResult":{ + "type":"structure", + "members":{ + } + }, "RevokeIpRulesRequest":{ "type":"structure", "required":[ @@ -1751,6 +1819,20 @@ "type":"string", "pattern":"^(sg-[0-9a-f]{8})$" }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotTime":{ + "shape":"Timestamp", + "documentation":"

The time when the snapshot was created.

" + } + }, + "documentation":"

Describes a snapshot.

" + }, + "SnapshotList":{ + "type":"list", + "member":{"shape":"Snapshot"} + }, "StartRequest":{ "type":"structure", "members":{ @@ -2341,6 +2423,7 @@ "REBOOTING", "STARTING", "REBUILDING", + "RESTORING", "MAINTENANCE", "ADMIN_MAINTENANCE", "TERMINATING", diff --git a/docs/source/conf.py b/docs/source/conf.py index 4eec6874..e486b386 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.12.2' # The full version, including alpha/beta/rc tags. -release = '1.12.208' +release = '1.12.241' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/requirements.txt b/requirements.txt index ebf2d72c..0ad64b08 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,6 @@ tox>=2.5.0,<3.0.0 nose==1.3.7 mock==1.3.0 wheel==0.24.0 -docutils>=0.10,<0.15 +docutils>=0.10,<0.16 behave==1.2.5 jsonschema==2.5.1 diff --git a/setup.cfg b/setup.cfg index 60669cc0..8aa88d51 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ requires-dist = python-dateutil>=2.1,<2.7.0; python_version=="2.6" python-dateutil>=2.1,<3.0.0; python_version>="2.7" jmespath>=0.7.1,<1.0.0 - docutils>=0.10,<0.15 + docutils>=0.10,<0.16 ordereddict==1.1; python_version=="2.6" simplejson==3.3.0; python_version=="2.6" urllib3>=1.20,<1.23; python_version=="3.3" diff --git a/setup.py b/setup.py index 7a45204f..f4b1b13b 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ def find_version(*file_paths): requires = ['jmespath>=0.7.1,<1.0.0', - 'docutils>=0.10,<0.15'] + 'docutils>=0.10,<0.16'] if sys.version_info[:2] == (2, 6): diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py index 4c5812b0..4dfa2f7e 100644 --- a/tests/functional/test_waiter_config.py +++ b/tests/functional/test_waiter_config.py @@ -156,7 +156,7 @@ def _validate_acceptor(acceptor, op_model, waiter_name): # JMESPath expression against the output. We'll then # check a few things about this returned search result. search_result = _search_jmespath_expression(expression, op_model) - if not search_result: + if search_result is None: raise AssertionError("JMESPath expression did not match " "anything for waiter '%s': %s" % (waiter_name, expression)) diff --git a/tests/integration/test_ec2.py b/tests/integration/test_ec2.py index 4968e7e9..6a3feed8 100644 --- a/tests/integration/test_ec2.py +++ b/tests/integration/test_ec2.py @@ -64,9 +64,9 @@ class TestEC2Pagination(unittest.TestCase): self.assertEqual(len(results), 3) for parsed in results: reserved_inst_offer = parsed['ReservedInstancesOfferings'] - # There should only be one reserved instance offering on each - # page. - self.assertEqual(len(reserved_inst_offer), 1) + # There should be no more than one reserved instance + # offering on each page. + self.assertLessEqual(len(reserved_inst_offer), 1) def test_can_fall_back_to_old_starting_token(self): # Using an operation that we know will paginate.