diff --git a/PKG-INFO b/PKG-INFO index a9e1bcbb..dfc8b841 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.13.37 +Version: 1.14.14 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -22,7 +22,7 @@ Description: botocore `boto3 `__. On 10/09/2019 support for Python 2.6 and Python 3.3 was deprecated and support - will be dropped on 01/10/2020. To avoid disruption, customers using Botocore + was dropped on 01/10/2020. To avoid disruption, customers using Botocore on Python 2.6 or 3.3 will need to upgrade their version of Python or pin the version of Botocore in use prior to 01/10/2020. For more information, see this `blog post `__. @@ -54,11 +54,10 @@ Classifier: Natural Language :: English Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 diff --git a/README.rst b/README.rst index 9b41fa18..4241d77c 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ botocore package is the foundation for the `boto3 `__. On 10/09/2019 support for Python 2.6 and Python 3.3 was deprecated and support -will be dropped on 01/10/2020. To avoid disruption, customers using Botocore +was dropped on 01/10/2020. To avoid disruption, customers using Botocore on Python 2.6 or 3.3 will need to upgrade their version of Python or pin the version of Botocore in use prior to 01/10/2020. For more information, see this `blog post `__. diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index a9e1bcbb..dfc8b841 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.13.37 +Version: 1.14.14 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -22,7 +22,7 @@ Description: botocore `boto3 `__. On 10/09/2019 support for Python 2.6 and Python 3.3 was deprecated and support - will be dropped on 01/10/2020. To avoid disruption, customers using Botocore + was dropped on 01/10/2020. To avoid disruption, customers using Botocore on Python 2.6 or 3.3 will need to upgrade their version of Python or pin the version of Botocore in use prior to 01/10/2020. For more information, see this `blog post `__. @@ -54,11 +54,10 @@ Classifier: Natural Language :: English Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 75e6e1df..9d80c0d9 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -207,6 +207,8 @@ botocore/data/codeguruprofiler/2019-07-18/service-2.json botocore/data/codepipeline/2015-07-09/examples-1.json botocore/data/codepipeline/2015-07-09/paginators-1.json botocore/data/codepipeline/2015-07-09/service-2.json +botocore/data/codestar-connections/2019-12-01/paginators-1.json +botocore/data/codestar-connections/2019-12-01/service-2.json botocore/data/codestar-notifications/2019-10-15/paginators-1.json botocore/data/codestar-notifications/2019-10-15/service-2.json botocore/data/codestar/2017-04-19/paginators-1.json @@ -242,6 +244,8 @@ botocore/data/datasync/2018-11-09/paginators-1.json botocore/data/datasync/2018-11-09/service-2.json botocore/data/dax/2017-04-19/paginators-1.json botocore/data/dax/2017-04-19/service-2.json +botocore/data/detective/2018-10-26/paginators-1.json +botocore/data/detective/2018-10-26/service-2.json botocore/data/devicefarm/2015-06-23/examples-1.json botocore/data/devicefarm/2015-06-23/paginators-1.json botocore/data/devicefarm/2015-06-23/service-2.json @@ -303,6 +307,7 @@ botocore/data/ec2/2016-11-15/waiters-2.json botocore/data/ecr/2015-09-21/examples-1.json botocore/data/ecr/2015-09-21/paginators-1.json botocore/data/ecr/2015-09-21/service-2.json +botocore/data/ecr/2015-09-21/waiters-2.json botocore/data/ecs/2014-11-13/examples-1.json botocore/data/ecs/2014-11-13/paginators-1.json botocore/data/ecs/2014-11-13/service-2.json diff --git a/botocore.egg-info/requires.txt b/botocore.egg-info/requires.txt index a1eadc8d..ecdc4d6c 100644 --- a/botocore.egg-info/requires.txt +++ b/botocore.egg-info/requires.txt @@ -1,8 +1,4 @@ jmespath<1.0.0,>=0.7.1 docutils<0.16,>=0.10 -python-dateutil<2.8.1,>=2.1 +python-dateutil<3.0.0,>=2.1 urllib3<1.26,>=1.20 - -[:python_version=="2.6"] -ordereddict==1.1 -simplejson==3.3.0 diff --git a/botocore/__init__.py b/botocore/__init__.py index 30201d2e..b2f3b9a0 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.13.37' +__version__ = '1.14.14' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index d60fecaf..bbf236ea 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -31,7 +31,7 @@ from botocore.endpoint import EndpointCreator logger = logging.getLogger(__name__) -VALID_STS_REGIONAL_ENDPOINTS_CONFIG = [ +VALID_REGIONAL_ENDPOINTS_CONFIG = [ 'legacy', 'regional', ] @@ -83,10 +83,6 @@ class ClientArgsCreator(object): signing_region = endpoint_config['signing_region'] endpoint_region_name = endpoint_config['region_name'] - if signing_region is None and endpoint_region_name is None: - signing_region, endpoint_region_name = \ - self._get_default_s3_region(service_name, endpoint_bridge) - config_kwargs['region_name'] = endpoint_region_name event_emitter = copy.copy(self._event_emitter) signer = RequestSigner( @@ -147,12 +143,14 @@ class ClientArgsCreator(object): if client_config.user_agent_extra is not None: user_agent += ' %s' % client_config.user_agent_extra + s3_config = self.compute_s3_config(client_config) endpoint_config = self._compute_endpoint_config( service_name=service_name, region_name=region_name, endpoint_url=endpoint_url, is_secure=is_secure, endpoint_bridge=endpoint_bridge, + s3_config=s3_config, ) # Create a new client config to be passed to the client based # on the final values. We do not want the user to be able @@ -203,18 +201,76 @@ class ClientArgsCreator(object): return s3_configuration def _compute_endpoint_config(self, service_name, region_name, endpoint_url, - is_secure, endpoint_bridge): - endpoint_config = endpoint_bridge.resolve( - service_name, region_name, endpoint_url, is_secure) - if self._should_set_global_sts_endpoint( - service_name, region_name, endpoint_url): - self._set_global_sts_endpoint(endpoint_config, is_secure) + is_secure, endpoint_bridge, s3_config): + resolve_endpoint_kwargs = { + 'service_name': service_name, + 'region_name': region_name, + 'endpoint_url': endpoint_url, + 'is_secure': is_secure, + 'endpoint_bridge': endpoint_bridge, + } + if service_name == 's3': + return self._compute_s3_endpoint_config( + s3_config=s3_config, **resolve_endpoint_kwargs) + if service_name == 'sts': + return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs) + return self._resolve_endpoint(**resolve_endpoint_kwargs) + + def _compute_s3_endpoint_config(self, s3_config, + **resolve_endpoint_kwargs): + force_s3_global = self._should_force_s3_global( + resolve_endpoint_kwargs['region_name'], s3_config) + if force_s3_global: + resolve_endpoint_kwargs['region_name'] = None + endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs) + self._set_region_if_custom_s3_endpoint( + endpoint_config, resolve_endpoint_kwargs['endpoint_bridge']) + # For backwards compatibility reasons, we want to make sure the + # client.meta.region_name will remain us-east-1 if we forced the + # endpoint to be the global region. Specifically, if this value + # changes to aws-global, it breaks logic where a user is checking + # for us-east-1 as the global endpoint such as in creating buckets. + if force_s3_global and endpoint_config['region_name'] == 'aws-global': + endpoint_config['region_name'] = 'us-east-1' return endpoint_config - def _should_set_global_sts_endpoint(self, service_name, region_name, - endpoint_url): - if service_name != 'sts': - return False + def _should_force_s3_global(self, region_name, s3_config): + s3_regional_config = 'legacy' + if s3_config and 'us_east_1_regional_endpoint' in s3_config: + s3_regional_config = s3_config['us_east_1_regional_endpoint'] + self._validate_s3_regional_config(s3_regional_config) + return ( + s3_regional_config == 'legacy' and + region_name in ['us-east-1', None] + ) + + def _validate_s3_regional_config(self, config_val): + if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG: + raise botocore.exceptions.\ + InvalidS3UsEast1RegionalEndpointConfigError( + s3_us_east_1_regional_endpoint_config=config_val) + + def _set_region_if_custom_s3_endpoint(self, endpoint_config, + endpoint_bridge): + # If a user is providing a custom URL, the endpoint resolver will + # refuse to infer a signing region. If we want to default to s3v4, + # we have to account for this. + if endpoint_config['signing_region'] is None \ + and endpoint_config['region_name'] is None: + endpoint = endpoint_bridge.resolve('s3') + endpoint_config['signing_region'] = endpoint['signing_region'] + endpoint_config['region_name'] = endpoint['region_name'] + + def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs): + endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs) + if self._should_set_global_sts_endpoint( + resolve_endpoint_kwargs['region_name'], + resolve_endpoint_kwargs['endpoint_url']): + self._set_global_sts_endpoint( + endpoint_config, resolve_endpoint_kwargs['is_secure']) + return endpoint_config + + def _should_set_global_sts_endpoint(self, region_name, endpoint_url): if endpoint_url: return False return ( @@ -228,7 +284,7 @@ class ClientArgsCreator(object): if not sts_regional_endpoints_config: sts_regional_endpoints_config = 'legacy' if sts_regional_endpoints_config not in \ - VALID_STS_REGIONAL_ENDPOINTS_CONFIG: + VALID_REGIONAL_ENDPOINTS_CONFIG: raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError( sts_regional_endpoints_config=sts_regional_endpoints_config) return sts_regional_endpoints_config @@ -238,14 +294,10 @@ class ClientArgsCreator(object): endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme endpoint_config['signing_region'] = 'us-east-1' - def _get_default_s3_region(self, service_name, endpoint_bridge): - # If a user is providing a custom URL, the endpoint resolver will - # refuse to infer a signing region. If we want to default to s3v4, - # we have to account for this. - if service_name == 's3': - endpoint = endpoint_bridge.resolve('s3') - return endpoint['signing_region'], endpoint['region_name'] - return None, None + def _resolve_endpoint(self, service_name, region_name, + endpoint_url, is_secure, endpoint_bridge): + return endpoint_bridge.resolve( + service_name, region_name, endpoint_url, is_secure) def _compute_socket_options(self, scoped_config): # This disables Nagle's algorithm and is the default socket options diff --git a/botocore/awsrequest.py b/botocore/awsrequest.py index f2fd925e..535b91be 100644 --- a/botocore/awsrequest.py +++ b/botocore/awsrequest.py @@ -15,7 +15,6 @@ import sys import logging import functools import socket -import collections import urllib3.util from urllib3.connection import VerifiedHTTPSConnection @@ -26,7 +25,7 @@ from urllib3.connectionpool import HTTPSConnectionPool import botocore.utils from botocore.compat import six from botocore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit, \ - urlencode + urlencode, MutableMapping from botocore.exceptions import UnseekableStreamError @@ -82,38 +81,6 @@ class AWSConnection(object): self._expect_header_set = False self.response_class = self._original_response_cls - def _tunnel(self): - # Works around a bug in py26 which is fixed in later versions of - # python. Bug involves hitting an infinite loop if readline() returns - # nothing as opposed to just ``\r\n``. - # As much as I don't like having if py2: code blocks, this seems - # the cleanest way to handle this workaround. Fortunately, the - # difference from py26 to py3 is very minimal. We're essentially - # just overriding the while loop. - if sys.version_info[:2] != (2, 6): - return super(AWSConnection, self)._tunnel() - - # Otherwise we workaround the issue. - self._set_hostport(self._tunnel_host, self._tunnel_port) - self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port)) - for header, value in self._tunnel_headers.iteritems(): - self.send("%s: %s\r\n" % (header, value)) - self.send("\r\n") - response = self.response_class(self.sock, strict=self.strict, - method=self._method) - (version, code, message) = response._read_status() - - if code != 200: - self.close() - raise socket.error("Tunnel connection failed: %d %s" % - (code, message.strip())) - while True: - line = response.fp.readline() - if not line: - break - if line in (b'\r\n', b'\n', b''): - break - def _send_request(self, method, url, body, headers, *args, **kwargs): self._response_received = False if headers.get('Expect', b'') == b'100-continue': @@ -621,7 +588,7 @@ class _HeaderKey(object): return repr(self._key) -class HeadersDict(collections.MutableMapping): +class HeadersDict(MutableMapping): """A case-insenseitive dictionary to represent HTTP headers. """ def __init__(self, *args, **kwargs): self._dict = {} diff --git a/botocore/client.py b/botocore/client.py index cc45cc9b..a10fbcb6 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -194,9 +194,13 @@ class ClientCreator(object): # Check to see if the region is a region that we know about. If we # don't know about a region, then we can safely assume it's a new # region that is sigv4 only, since all new S3 regions only allow sigv4. + # The only exception is aws-global. This is a pseudo-region for the + # global endpoint, we should respect the signature versions it + # supports, which includes v2. regions = self._endpoint_resolver.get_available_endpoints( 's3', client_meta.partition) - if client_meta.region_name not in regions: + if client_meta.region_name != 'aws-global' and \ + client_meta.region_name not in regions: return # If it is a region we know about, we want to default to sigv2, so here @@ -331,13 +335,14 @@ class ClientEndpointBridge(object): def _create_endpoint(self, resolved, service_name, region_name, endpoint_url, is_secure): + explicit_region = region_name is not None region_name, signing_region = self._pick_region_values( resolved, region_name, endpoint_url) if endpoint_url is None: if self._is_s3_dualstack_mode(service_name): endpoint_url = self._create_dualstack_endpoint( service_name, region_name, - resolved['dnsSuffix'], is_secure) + resolved['dnsSuffix'], is_secure, explicit_region) else: # Use the sslCommonName over the hostname for Python 2.6 compat. hostname = resolved.get('sslCommonName', resolved.get('hostname')) @@ -373,7 +378,12 @@ class ClientEndpointBridge(object): return False def _create_dualstack_endpoint(self, service_name, region_name, - dns_suffix, is_secure): + dns_suffix, is_secure, explicit_region): + if not explicit_region and region_name == 'aws-global': + # If the region_name passed was not explicitly set, default to + # us-east-1 instead of the modeled default aws-global. Dualstack + # does not support aws-global + region_name = 'us-east-1' hostname = '{service}.dualstack.{region}.{dns_suffix}'.format( service=service_name, region=region_name, dns_suffix=dns_suffix) diff --git a/botocore/compat.py b/botocore/compat.py index 47e1c813..4c8424cb 100644 --- a/botocore/compat.py +++ b/botocore/compat.py @@ -141,42 +141,13 @@ else: return s raise ValueError("Expected str or unicode, received %s." % type(s)) -try: - from collections import OrderedDict -except ImportError: - # Python2.6 we use the 3rd party back port. - from ordereddict import OrderedDict + +from collections import OrderedDict -if sys.version_info[:2] == (2, 6): - import simplejson as json - # In py26, invalid xml parsed by element tree - # will raise a plain old SyntaxError instead of - # a real exception, so we need to abstract this change. - XMLParseError = SyntaxError - - # Handle https://github.com/shazow/urllib3/issues/497 for py2.6. In - # python2.6, there is a known issue where sometimes we cannot read the SAN - # from an SSL cert (http://bugs.python.org/issue13034). However, newer - # versions of urllib3 will warn you when there is no SAN. While we could - # just turn off this warning in urllib3 altogether, we _do_ want warnings - # when they're legitimate warnings. This method tries to scope the warning - # filter to be as specific as possible. - def filter_ssl_san_warnings(): - warnings.filterwarnings( - 'ignore', - message="Certificate has no.*subjectAltName.*", - category=exceptions.SecurityWarning, - module=r".*urllib3\.connection") -else: - import xml.etree.cElementTree - XMLParseError = xml.etree.cElementTree.ParseError - import json - - def filter_ssl_san_warnings(): - # Noop for non-py26 versions. We will parse the SAN - # appropriately. - pass +import xml.etree.cElementTree +XMLParseError = xml.etree.cElementTree.ParseError +import json def filter_ssl_warnings(): @@ -186,7 +157,6 @@ def filter_ssl_warnings(): message="A true SSLContext object is not available.*", category=exceptions.InsecurePlatformWarning, module=r".*urllib3\.util\.ssl_") - filter_ssl_san_warnings() @classmethod @@ -210,19 +180,9 @@ HTTPHeaders.from_pairs = from_pairs def copy_kwargs(kwargs): """ - There is a bug in Python versions < 2.6.5 that prevents you - from passing unicode keyword args (#4978). This function - takes a dictionary of kwargs and returns a copy. If you are - using Python < 2.6.5, it also encodes the keys to avoid this bug. - Oh, and version_info wasn't a namedtuple back then, either! + This used to be a compat shim for 2.6 but is now just an alias. """ - vi = sys.version_info - if vi[0] == 2 and vi[1] <= 6 and vi[3] < 5: - copy_kwargs = {} - for key in kwargs: - copy_kwargs[key.encode('utf-8')] = kwargs[key] - else: - copy_kwargs = copy.copy(kwargs) + copy_kwargs = copy.copy(kwargs) return copy_kwargs @@ -230,22 +190,12 @@ def total_seconds(delta): """ Returns the total seconds in a ``datetime.timedelta``. - Python 2.6 does not have ``timedelta.total_seconds()``, so we have - to calculate this ourselves. On 2.7 or better, we'll take advantage of the - built-in method. - - The math was pulled from the ``datetime`` docs - (http://docs.python.org/2.7/library/datetime.html#datetime.timedelta.total_seconds). + This used to be a compat shim for 2.6 but is now just an alias. :param delta: The timedelta object :type delta: ``datetime.timedelta`` """ - if sys.version_info[:2] != (2, 6): - return delta.total_seconds() - - day_in_seconds = delta.days * 24 * 3600.0 - micro_in_seconds = delta.microseconds / 10.0**6 - return day_in_seconds + delta.seconds + micro_in_seconds + return delta.total_seconds() # Checks to see if md5 is available on this system. A given system might not @@ -377,3 +327,9 @@ def _windows_shell_split(s): components.append(''.join(buff)) return components + + +try: + from collections.abc import MutableMapping +except ImportError: + from collections import MutableMapping diff --git a/botocore/config.py b/botocore/config.py index 038d17eb..e80da122 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -91,6 +91,18 @@ class Config(object): * path -- Addressing style is always by path. Endpoints will be addressed as such: s3.amazonaws.com/mybucket + * 'us_east_1_regional_endpoint' - Refers to what S3 endpoint to use + when the region is configured to be us-east-1. Values must be a + string that equals: + + * regional -- Use the us-east-1.amazonaws.com endpoint if the + client is configured to use the us-east-1 region. + + * legacy -- Use the s3.amazonaws.com endpoint if the client is + configured to use the us-east-1 region. This is the default if + the configuration option is not specified. + + :type retries: dict :param retries: A dictionary for retry specific configurations. Valid keys are: diff --git a/botocore/configprovider.py b/botocore/configprovider.py index c9d2032e..bde7b8d6 100644 --- a/botocore/configprovider.py +++ b/botocore/configprovider.py @@ -109,6 +109,11 @@ DEFAULT_S3_CONFIG_VARS = { ['s3_use_arn_region', ('s3', 'use_arn_region')], 'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean + ), + 'us_east_1_regional_endpoint': ( + ['s3_us_east_1_regional_endpoint', + ('s3', 'us_east_1_regional_endpoint')], + 'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', None, None ) } diff --git a/botocore/data/accessanalyzer/2019-11-01/service-2.json b/botocore/data/accessanalyzer/2019-11-01/service-2.json index b95f372c..fffbbbe0 100644 --- a/botocore/data/accessanalyzer/2019-11-01/service-2.json +++ b/botocore/data/accessanalyzer/2019-11-01/service-2.json @@ -29,7 +29,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Creates an analyzer with a zone of trust set to your account.

", + "documentation":"

Creates an analyzer for your account.

", "idempotent":true }, "CreateArchiveRule":{ @@ -49,7 +49,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Creates an archive rule for the specified analyzer.

", + "documentation":"

Creates an archive rule for the specified analyzer. Archive rules automatically archive findings that meet the criteria you define when you create the rule.

", "idempotent":true }, "DeleteAnalyzer":{ @@ -104,7 +104,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves information about an analyzed resource.

" + "documentation":"

Retrieves information about a resource that was analyzed.

" }, "GetAnalyzer":{ "name":"GetAnalyzer", @@ -176,7 +176,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves a list of resources that have been analyzed.

" + "documentation":"

Retrieves a list of resources of the specified type that have been analyzed by the specified analyzer..

" }, "ListAnalyzers":{ "name":"ListAnalyzers", @@ -263,7 +263,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Starts a scan of the policies applied to the specified resource.

" + "documentation":"

Immediately starts a scan of the policies applied to the specified resource.

" }, "TagResource":{ "name":"TagResource", @@ -318,7 +318,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Updates the specified archive rule.

", + "documentation":"

Updates the criteria and values for the specified archive rule.

", "idempotent":true }, "UpdateFindings":{ @@ -336,7 +336,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Updates findings with the new values provided in the request.

", + "documentation":"

Updates the status for the specified findings.

", "idempotent":true } }, @@ -561,7 +561,7 @@ }, "archiveRules":{ "shape":"InlineArchiveRulesList", - "documentation":"

Specifies the archive rules to add for the analyzer.

" + "documentation":"

Specifies the archive rules to add for the analyzer. Archive rules automatically archive findings that meet the criteria you define for the rule.

" }, "clientToken":{ "shape":"String", @@ -574,7 +574,7 @@ }, "type":{ "shape":"Type", - "documentation":"

The zone of trust for the analyzer. You can create only one analyzer per account per Region.

" + "documentation":"

The type of analyzer to create. Only ACCOUNT analyzers are supported. You can create only one analyzer per account per Region.

" } }, "documentation":"

Creates an analyzer.

" @@ -639,7 +639,7 @@ "documentation":"

A \"not equals\" operator to match for the filter used to create the rule.

" } }, - "documentation":"

The criteria to use in the filter that defines the rule.

" + "documentation":"

The criteria to use in the filter that defines the archive rule.

" }, "DeleteAnalyzerRequest":{ "type":"structure", @@ -670,7 +670,7 @@ "members":{ "analyzerName":{ "shape":"Name", - "documentation":"

The name of the analyzer that was deleted.

", + "documentation":"

The name of the analyzer that associated with the archive rule to delete.

", "location":"uri", "locationName":"analyzerName" }, @@ -961,7 +961,7 @@ "documentation":"

A finding object that contains finding details.

" } }, - "documentation":"

The resposne to the request.

" + "documentation":"

The response to the request.

" }, "InlineArchiveRule":{ "type":"structure", @@ -972,14 +972,14 @@ "members":{ "filter":{ "shape":"FilterCriteriaMap", - "documentation":"

The criteria for the rule.

" + "documentation":"

The condition and values for a criterion.

" }, "ruleName":{ "shape":"Name", "documentation":"

The name of the rule.

" } }, - "documentation":"

An inline archive rule.

" + "documentation":"

An criterion statement in an archive rule. Each archive rule may have multiple criteria.

" }, "InlineArchiveRulesList":{ "type":"list", @@ -1061,7 +1061,7 @@ }, "type":{ "shape":"Type", - "documentation":"

The type of analyzer, which corresponds to the zone of trust selected when the analyzer was created.

", + "documentation":"

The type of analyzer.

", "location":"querystring", "locationName":"type" } @@ -1163,7 +1163,7 @@ "documentation":"

A token used for pagination of results returned.

" } }, - "documentation":"

The resposne to the request.

" + "documentation":"

The response to the request.

" }, "ListTagsForResourceRequest":{ "type":"structure", @@ -1287,7 +1287,7 @@ "documentation":"

The sort order, ascending or descending.

" } }, - "documentation":"

The sort criteria.

" + "documentation":"

The criteria used to sort.

" }, "StartResourceScanRequest":{ "type":"structure", @@ -1362,7 +1362,10 @@ }, "exception":true }, - "Timestamp":{"type":"timestamp"}, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "Token":{"type":"string"}, "Type":{ "type":"string", @@ -1394,7 +1397,7 @@ "type":"structure", "members":{ }, - "documentation":"

The response tot he request.

" + "documentation":"

The response to the request.

" }, "UpdateArchiveRuleRequest":{ "type":"structure", @@ -1521,5 +1524,5 @@ "min":1 } }, - "documentation":"

AWS IAM Access Analyzer API Reference

" + "documentation":"

AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see the AWS IAM Access Analyzer section of the IAM User Guide.

To start using Access Analyzer, you first need to create an analyzer.

" } diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index 1cd738ae..868cf9aa 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -1753,6 +1753,7 @@ "OFFLINE" ] }, + "ConnectionStatusUpdatedTime":{"type":"timestamp"}, "Contact":{ "type":"structure", "members":{ @@ -2731,10 +2732,15 @@ "DeviceStatusInfo":{ "shape":"DeviceStatusInfo", "documentation":"

Detailed information about a device's status.

" + }, + "CreatedTime":{ + "shape":"DeviceDataCreatedTime", + "documentation":"

The time (in epoch) when the device data was created.

" } }, "documentation":"

Device attributes.

" }, + "DeviceDataCreatedTime":{"type":"timestamp"}, "DeviceDataList":{ "type":"list", "member":{"shape":"DeviceData"} @@ -2873,6 +2879,10 @@ "ConnectionStatus":{ "shape":"ConnectionStatus", "documentation":"

The latest available information about the connection status of a device.

" + }, + "ConnectionStatusUpdatedTime":{ + "shape":"ConnectionStatusUpdatedTime", + "documentation":"

The time (in epoch) when the device connection status changed.

" } }, "documentation":"

Detailed information about a device's status.

" diff --git a/botocore/data/application-insights/2018-11-25/service-2.json b/botocore/data/application-insights/2018-11-25/service-2.json index 509c01eb..fd3f072a 100644 --- a/botocore/data/application-insights/2018-11-25/service-2.json +++ b/botocore/data/application-insights/2018-11-25/service-2.json @@ -259,6 +259,21 @@ ], "documentation":"

Lists the auto-grouped, standalone, and custom components of the application.

" }, + "ListConfigurationHistory":{ + "name":"ListConfigurationHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListConfigurationHistoryRequest"}, + "output":{"shape":"ListConfigurationHistoryResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists the INFO, WARN, and ERROR events for periodic configuration updates performed by Application Insights. Examples of events represented are:

  • INFO: creating a new alarm or updating an alarm threshold.

  • WARN: alarm not created due to insufficient data points used to predict thresholds.

  • ERROR: alarm not created due to permission errors or exceeding quotas.

" + }, "ListLogPatternSets":{ "name":"ListLogPatternSets", "http":{ @@ -464,7 +479,7 @@ }, "Remarks":{ "shape":"Remarks", - "documentation":"

The issues on the user side that block Application Insights from successfully monitoring an application.

" + "documentation":"

The issues on the user side that block Application Insights from successfully monitoring an application. Example remarks include:

  • “Configuring application, detected 1 Errors, 3 Warnings”

  • “Configuring application, detected 1 Unconfigured Components”

" } }, "documentation":"

Describes the status of the application.

" @@ -487,6 +502,60 @@ "min":1 }, "ComponentName":{"type":"string"}, + "ConfigurationEvent":{ + "type":"structure", + "members":{ + "MonitoredResourceARN":{ + "shape":"ConfigurationEventMonitoredResourceARN", + "documentation":"

The resource monitored by Application Insights.

" + }, + "EventStatus":{ + "shape":"ConfigurationEventStatus", + "documentation":"

The status of the configuration update event. Possible values include INFO, WARN, and ERROR.

" + }, + "EventResourceType":{ + "shape":"ConfigurationEventResourceType", + "documentation":"

The resource type that Application Insights attempted to configure, for example, CLOUDWATCH_ALARM.

" + }, + "EventTime":{ + "shape":"ConfigurationEventTime", + "documentation":"

The timestamp of the event.

" + }, + "EventDetail":{ + "shape":"ConfigurationEventDetail", + "documentation":"

The details of the event in plain text.

" + }, + "EventResourceName":{ + "shape":"ConfigurationEventResourceName", + "documentation":"

The name of the resource Application Insights attempted to configure.

" + } + }, + "documentation":"

The event information.

" + }, + "ConfigurationEventDetail":{"type":"string"}, + "ConfigurationEventList":{ + "type":"list", + "member":{"shape":"ConfigurationEvent"} + }, + "ConfigurationEventMonitoredResourceARN":{"type":"string"}, + "ConfigurationEventResourceName":{"type":"string"}, + "ConfigurationEventResourceType":{ + "type":"string", + "enum":[ + "CLOUDWATCH_ALARM", + "CLOUDFORMATION", + "SSM_ASSOCIATION" + ] + }, + "ConfigurationEventStatus":{ + "type":"string", + "enum":[ + "INFO", + "WARN", + "ERROR" + ] + }, + "ConfigurationEventTime":{"type":"timestamp"}, "CreateApplicationRequest":{ "type":"structure", "required":["ResourceGroupName"], @@ -946,6 +1015,48 @@ } } }, + "ListConfigurationHistoryRequest":{ + "type":"structure", + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

Resource group to which the application belongs.

" + }, + "StartTime":{ + "shape":"StartTime", + "documentation":"

The start time of the event.

" + }, + "EndTime":{ + "shape":"EndTime", + "documentation":"

The end time of the event.

" + }, + "EventStatus":{ + "shape":"ConfigurationEventStatus", + "documentation":"

The status of the configuration update event. Possible values include INFO, WARN, and ERROR.

" + }, + "MaxResults":{ + "shape":"MaxEntities", + "documentation":"

The maximum number of results returned by ListConfigurationHistory in paginated output. When this parameter is used, ListConfigurationHistory returns only MaxResults in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another ListConfigurationHistory request with the returned NextToken value. If this parameter is not used, then ListConfigurationHistory returns all results.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The NextToken value returned from a previous paginated ListConfigurationHistory request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

" + } + } + }, + "ListConfigurationHistoryResponse":{ + "type":"structure", + "members":{ + "EventList":{ + "shape":"ConfigurationEventList", + "documentation":"

The list of configuration events and their corresponding details.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The NextToken value to include in a future ListConfigurationHistory request. When the results of a ListConfigurationHistory request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "ListLogPatternSetsRequest":{ "type":"structure", "required":["ResourceGroupName"], diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index f014c882..b2dfb371 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -760,7 +760,8 @@ "shape":"ApiCacheStatus", "documentation":"

The cache instance status.

  • AVAILABLE: The instance is available for use.

  • CREATING: The instance is currently creating.

  • DELETING: The instance is currently deleting.

  • MODIFYING: The instance is currently modifying.

  • FAILED: The instance has failed creation.

" } - } + }, + "documentation":"

The ApiCache object.

" }, "ApiCacheStatus":{ "type":"string", @@ -1182,6 +1183,10 @@ "additionalAuthenticationProviders":{ "shape":"AdditionalAuthenticationProviders", "documentation":"

A list of additional authentication providers for the GraphqlApi API.

" + }, + "xrayEnabled":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether to enable X-Ray tracing for the GraphqlApi.

" } } }, @@ -1688,7 +1693,10 @@ "GetApiCacheResponse":{ "type":"structure", "members":{ - "apiCache":{"shape":"ApiCache"} + "apiCache":{ + "shape":"ApiCache", + "documentation":"

The ApiCache object.

" + } }, "documentation":"

Represents the output of a GetApiCache operation.

" }, @@ -1960,6 +1968,10 @@ "additionalAuthenticationProviders":{ "shape":"AdditionalAuthenticationProviders", "documentation":"

A list of additional authentication providers for the GraphqlApi API.

" + }, + "xrayEnabled":{ + "shape":"Boolean", + "documentation":"

A flag representing whether X-Ray tracing is enabled for this GraphqlApi.

" } }, "documentation":"

Describes a GraphQL API.

" @@ -1999,7 +2011,8 @@ "shape":"String", "documentation":"

The Arn for the Lambda function to use as the Conflict Handler.

" } - } + }, + "documentation":"

The LambdaConflictHandlerConfig object when configuring LAMBDA as the Conflict Handler.

" }, "LambdaDataSourceConfig":{ "type":"structure", @@ -2942,6 +2955,10 @@ "additionalAuthenticationProviders":{ "shape":"AdditionalAuthenticationProviders", "documentation":"

A list of additional authentication providers for the GraphqlApi API.

" + }, + "xrayEnabled":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether to enable X-Ray tracing for the GraphqlApi.

" } } }, diff --git a/botocore/data/backup/2018-11-15/service-2.json b/botocore/data/backup/2018-11-15/service-2.json index 54c4c0ca..a80b024a 100644 --- a/botocore/data/backup/2018-11-15/service-2.json +++ b/botocore/data/backup/2018-11-15/service-2.json @@ -26,7 +26,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Backup plans are documents that contain information that AWS Backup uses to schedule tasks that create recovery points of resources.

If you call CreateBackupPlan with a plan that already exists, the existing backupPlanId is returned.

", + "documentation":"

Backup plans are documents that contain information that AWS Backup uses to schedule tasks that create recovery points of resources.

If you call CreateBackupPlan with a plan that already exists, an AlreadyExistsException is returned.

", "idempotent":true }, "CreateBackupSelection":{ @@ -44,7 +44,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a JSON document that specifies a set of resources to assign to a backup plan. Resources can be included by specifying patterns for a ListOfTags and selected Resources.

For example, consider the following patterns:

  • Resources: \"arn:aws:ec2:region:account-id:volume/volume-id\"

  • ConditionKey:\"department\"

    ConditionValue:\"finance\"

    ConditionType:\"StringEquals\"

  • ConditionKey:\"importance\"

    ConditionValue:\"critical\"

    ConditionType:\"StringEquals\"

Using these patterns would back up all Amazon Elastic Block Store (Amazon EBS) volumes that are tagged as \"department=finance\", \"importance=critical\", in addition to an EBS volume with the specified volume Id.

Resources and conditions are additive in that all resources that match the pattern are selected. This shouldn't be confused with a logical AND, where all conditions must match. The matching patterns are logically 'put together using the OR operator. In other words, all patterns that match are selected for backup.

", + "documentation":"

Creates a JSON document that specifies a set of resources to assign to a backup plan. Resources can be included by specifying patterns for a ListOfTags and selected Resources.

For example, consider the following patterns:

  • Resources: \"arn:aws:ec2:region:account-id:volume/volume-id\"

  • ConditionKey:\"department\"

    ConditionValue:\"finance\"

    ConditionType:\"STRINGEQUALS\"

  • ConditionKey:\"importance\"

    ConditionValue:\"critical\"

    ConditionType:\"STRINGEQUALS\"

Using these patterns would back up all Amazon Elastic Block Store (Amazon EBS) volumes that are tagged as \"department=finance\", \"importance=critical\", in addition to an EBS volume with the specified volume Id.

Resources and conditions are additive in that all resources that match the pattern are selected. This shouldn't be confused with a logical AND, where all conditions must match. The matching patterns are logically 'put together using the OR operator. In other words, all patterns that match are selected for backup.

", "idempotent":true }, "CreateBackupVault":{ @@ -197,6 +197,23 @@ "documentation":"

Returns metadata about a backup vault specified by its name.

", "idempotent":true }, + "DescribeCopyJob":{ + "name":"DescribeCopyJob", + "http":{ + "method":"GET", + "requestUri":"/copy-jobs/{copyJobId}" + }, + "input":{"shape":"DescribeCopyJobInput"}, + "output":{"shape":"DescribeCopyJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns metadata associated with creating a copy of a resource.

", + "idempotent":true + }, "DescribeProtectedResource":{ "name":"DescribeProtectedResource", "http":{ @@ -380,7 +397,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns two sets of metadata key-value pairs. The first set lists the metadata that the recovery point was created with. The second set lists the metadata key-value pairs that are required to restore the recovery point.

These sets can be the same, or the restore metadata set can contain different values if the target service to be restored has changed since the recovery point was created and now requires additional or different information in order to be restored.

", + "documentation":"

Returns a set of metadata key-value pairs that were used to create the backup.

", "idempotent":true }, "GetSupportedResourceTypes":{ @@ -405,6 +422,7 @@ "output":{"shape":"ListBackupJobsOutput"}, "errors":[ {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

Returns metadata about your backup jobs.

", @@ -494,6 +512,20 @@ "documentation":"

Returns a list of recovery point storage containers along with information about them.

", "idempotent":true }, + "ListCopyJobs":{ + "name":"ListCopyJobs", + "http":{ + "method":"GET", + "requestUri":"/copy-jobs/" + }, + "input":{"shape":"ListCopyJobsInput"}, + "output":{"shape":"ListCopyJobsOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns metadata about your copy jobs.

" + }, "ListProtectedResources":{ "name":"ListProtectedResources", "http":{ @@ -627,6 +659,24 @@ "documentation":"

Starts a job to create a one-time backup of the specified resource.

", "idempotent":true }, + "StartCopyJob":{ + "name":"StartCopyJob", + "http":{ + "method":"PUT", + "requestUri":"/copy-jobs" + }, + "input":{"shape":"StartCopyJobInput"}, + "output":{"shape":"StartCopyJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Starts a job to create a one-time copy of the specified resource.

", + "idempotent":true + }, "StartRestoreJob":{ "name":"StartRestoreJob", "http":{ @@ -985,6 +1035,10 @@ "RuleId":{ "shape":"string", "documentation":"

Uniquely identifies a rule that is used to schedule the backup of a selection of resources.

" + }, + "CopyActions":{ + "shape":"CopyActions", + "documentation":"

An array of CopyAction objects, which contains the details of the copy operation.

" } }, "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" @@ -1023,6 +1077,10 @@ "RecoveryPointTags":{ "shape":"Tags", "documentation":"

To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.

" + }, + "CopyActions":{ + "shape":"CopyActions", + "documentation":"

An array of CopyAction objects, which contains the details of the copy operation.

" } }, "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" @@ -1056,11 +1114,11 @@ }, "Resources":{ "shape":"ResourceArns", - "documentation":"

An array of strings that either contain Amazon Resource Names (ARNs) or match patterns such as \"arn:aws:ec2:us-east-1:123456789012:volume/*\" of resources to assign to a backup plan.

" + "documentation":"

An array of strings that contain Amazon Resource Names (ARNs) of resources to assign to a backup plan.

" }, "ListOfTags":{ "shape":"ListOfTags", - "documentation":"

An array of conditions used to specify a set of resources to assign to a backup plan; for example, \"StringEquals\": {\"ec2:ResourceTag/Department\": \"accounting\".

" + "documentation":"

An array of conditions used to specify a set of resources to assign to a backup plan; for example, \"STRINGEQUALS\": {\"ec2:ResourceTag/Department\": \"accounting\".

" } }, "documentation":"

Used to specify a set of resources to a backup plan.

" @@ -1108,8 +1166,16 @@ "enum":[ "BACKUP_JOB_STARTED", "BACKUP_JOB_COMPLETED", + "BACKUP_JOB_SUCCESSFUL", + "BACKUP_JOB_FAILED", + "BACKUP_JOB_EXPIRED", "RESTORE_JOB_STARTED", "RESTORE_JOB_COMPLETED", + "RESTORE_JOB_SUCCESSFUL", + "RESTORE_JOB_FAILED", + "COPY_JOB_STARTED", + "COPY_JOB_SUCCESSFUL", + "COPY_JOB_FAILED", "RECOVERY_POINT_MODIFIED", "BACKUP_PLAN_CREATED", "BACKUP_PLAN_MODIFIED" @@ -1182,7 +1248,7 @@ "members":{ "ConditionType":{ "shape":"ConditionType", - "documentation":"

An operation, such as StringEquals, that is applied to a key-value pair used to filter resources in a selection.

" + "documentation":"

An operation, such as STRINGEQUALS, that is applied to a key-value pair used to filter resources in a selection.

" }, "ConditionKey":{ "shape":"ConditionKey", @@ -1193,7 +1259,7 @@ "documentation":"

The value in a key-value pair. For example, in \"ec2:ResourceTag/Department\": \"accounting\", \"accounting\" is the value.

" } }, - "documentation":"

Contains an array of triplets made up of a condition type (such as StringEquals), a key, and a value. Conditions are used to filter resources in a selection that is assigned to a backup plan.

" + "documentation":"

Contains an array of triplets made up of a condition type (such as STRINGEQUALS), a key, and a value. Conditions are used to filter resources in a selection that is assigned to a backup plan.

" }, "ConditionKey":{"type":"string"}, "ConditionType":{ @@ -1201,6 +1267,94 @@ "enum":["STRINGEQUALS"] }, "ConditionValue":{"type":"string"}, + "CopyAction":{ + "type":"structure", + "required":["DestinationBackupVaultArn"], + "members":{ + "Lifecycle":{"shape":"Lifecycle"}, + "DestinationBackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. For example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + } + }, + "documentation":"

The details of the copy operation.

" + }, + "CopyActions":{ + "type":"list", + "member":{"shape":"CopyAction"} + }, + "CopyJob":{ + "type":"structure", + "members":{ + "CopyJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to copy a resource.

" + }, + "SourceBackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a source copy vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "SourceRecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a source recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "DestinationBackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a destination copy vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "DestinationRecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a destination recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time a copy job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time a job to create a copy job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "State":{ + "shape":"CopyJobState", + "documentation":"

The current state of a resource recovery point.

" + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

A detailed message explaining the status of the job that to copy a resource.

" + }, + "BackupSizeInBytes":{ + "shape":"Long", + "documentation":"

The size, in bytes, of a copy job.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "CreatedBy":{"shape":"RecoveryPointCreator"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + } + }, + "documentation":"

Contains detailed information about a copy job.

" + }, + "CopyJobState":{ + "type":"string", + "enum":[ + "CREATED", + "RUNNING", + "COMPLETED", + "FAILED" + ] + }, + "CopyJobsList":{ + "type":"list", + "member":{"shape":"CopyJob"} + }, "CreateBackupPlanInput":{ "type":"structure", "required":["BackupPlan"], @@ -1255,7 +1409,7 @@ }, "BackupSelection":{ "shape":"BackupSelection", - "documentation":"

Specifies the body of a request to assign a set of resources to a backup plan.

It includes an array of resources, an optional array of patterns to exclude resources, an optional role to provide access to the AWS service the resource belongs to, and an optional array of tags used to identify a set of resources.

" + "documentation":"

Specifies the body of a request to assign a set of resources to a backup plan.

" }, "CreatorRequestId":{ "shape":"string", @@ -1577,6 +1731,27 @@ } } }, + "DescribeCopyJobInput":{ + "type":"structure", + "required":["CopyJobId"], + "members":{ + "CopyJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to copy a resource.

", + "location":"uri", + "locationName":"copyJobId" + } + } + }, + "DescribeCopyJobOutput":{ + "type":"structure", + "members":{ + "CopyJob":{ + "shape":"CopyJob", + "documentation":"

Contains detailed information about a copy job.

" + } + } + }, "DescribeProtectedResourceInput":{ "type":"structure", "required":["ResourceArn"], @@ -1903,7 +2078,7 @@ "members":{ "BackupSelection":{ "shape":"BackupSelection", - "documentation":"

Specifies the body of a request to assign a set of resources to a backup plan.

It includes an array of resources, an optional array of patterns to exclude resources, an optional role to provide access to the AWS service that the resource belongs to, and an optional array of tags used to identify a set of resources.

" + "documentation":"

Specifies the body of a request to assign a set of resources to a backup plan.

" }, "SelectionId":{ "shape":"string", @@ -2019,7 +2194,7 @@ }, "RestoreMetadata":{ "shape":"Metadata", - "documentation":"

A set of metadata key-value pairs that lists the metadata key-value pairs that are required to restore the recovery point.

" + "documentation":"

The set of metadata key-value pairs that describes the original configuration of the backed-up resource. These values vary depending on the service that is being restored.

" } } }, @@ -2028,7 +2203,7 @@ "members":{ "ResourceTypes":{ "shape":"ResourceTypes", - "documentation":"

Contains a string with the supported AWS resource types:

  • EBS for Amazon Elastic Block Store

  • SGW for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

" + "documentation":"

Contains a string with the supported AWS resource types:

  • EBS for Amazon Elastic Block Store

  • Storage Gateway for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

" } } }, @@ -2077,10 +2252,10 @@ }, "DeleteAfterDays":{ "shape":"Long", - "documentation":"

Specifies the number of days after creation that a recovery point is deleted. Must be greater than MoveToColdStorageAfterDays.

" + "documentation":"

Specifies the number of days after creation that a recovery point is deleted. Must be greater than 90 days plus MoveToColdStorageAfterDays.

" } }, - "documentation":"

Contains an array of Transition objects specifying how long in days before a recovery point transitions to cold storage or is deleted.

" + "documentation":"

Contains an array of Transition objects specifying how long in days before a recovery point transitions to cold storage or is deleted.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, on the console, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" }, "LimitExceededException":{ "type":"structure", @@ -2146,7 +2321,7 @@ }, "ByResourceType":{ "shape":"ResourceType", - "documentation":"

Returns only backup jobs for the specified resources:

  • EBS for Amazon Elastic Block Store

  • SGW for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

", + "documentation":"

Returns only backup jobs for the specified resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

", "location":"querystring", "locationName":"resourceType" } @@ -2335,6 +2510,72 @@ } } }, + "ListCopyJobsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "ByResourceArn":{ + "shape":"ARN", + "documentation":"

Returns only copy jobs that match the specified resource Amazon Resource Name (ARN).

", + "location":"querystring", + "locationName":"resourceArn" + }, + "ByState":{ + "shape":"CopyJobState", + "documentation":"

Returns only copy jobs that are in the specified state.

", + "location":"querystring", + "locationName":"state" + }, + "ByCreatedBefore":{ + "shape":"timestamp", + "documentation":"

Returns only copy jobs that were created before the specified date.

", + "location":"querystring", + "locationName":"createdBefore" + }, + "ByCreatedAfter":{ + "shape":"timestamp", + "documentation":"

Returns only copy jobs that were created after the specified date.

", + "location":"querystring", + "locationName":"createdAfter" + }, + "ByResourceType":{ + "shape":"ResourceType", + "documentation":"

Returns only backup jobs for the specified resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

", + "location":"querystring", + "locationName":"resourceType" + }, + "ByDestinationVaultArn":{ + "shape":"string", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a source backup vault to copy from; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

", + "location":"querystring", + "locationName":"destinationVaultArn" + } + } + }, + "ListCopyJobsOutput":{ + "type":"structure", + "members":{ + "CopyJobs":{ + "shape":"CopyJobsList", + "documentation":"

An array of structures containing metadata about your copy jobs returned in JSON format.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, "ListOfTags":{ "type":"list", "member":{"shape":"Condition"} @@ -2549,7 +2790,8 @@ "Metadata":{ "type":"map", "key":{"shape":"MetadataKey"}, - "value":{"shape":"MetadataValue"} + "value":{"shape":"MetadataValue"}, + "sensitive":true }, "MetadataKey":{"type":"string"}, "MetadataValue":{"type":"string"}, @@ -2905,7 +3147,7 @@ }, "IdempotencyToken":{ "shape":"string", - "documentation":"

A customer chosen string that can be used to distinguish between calls to StartBackupJob. Idempotency tokens time out after one hour. Therefore, if you call StartBackupJob multiple times with the same idempotency token within one hour, AWS Backup recognizes that you are requesting only one backup job and initiates only one. If you change the idempotency token for each call, AWS Backup recognizes that you are requesting to start multiple backups.

" + "documentation":"

A customer chosen string that can be used to distinguish between calls to StartBackupJob.

" }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -2942,6 +3184,51 @@ } } }, + "StartCopyJobInput":{ + "type":"structure", + "required":[ + "RecoveryPointArn", + "SourceBackupVaultName", + "DestinationBackupVaultArn", + "IamRoleArn" + ], + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point to use for the copy job; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "SourceBackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical source container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens. >

" + }, + "DestinationBackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a destination backup vault to copy to; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + }, + "IdempotencyToken":{ + "shape":"string", + "documentation":"

A customer chosen string that can be used to distinguish between calls to StartCopyJob.

" + }, + "Lifecycle":{"shape":"Lifecycle"} + } + }, + "StartCopyJobOutput":{ + "type":"structure", + "members":{ + "CopyJobId":{ + "shape":"string", + "documentation":"

Uniquely identifies a request to AWS Backup to copy a resource.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup job is started, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. >

" + } + } + }, "StartRestoreJobInput":{ "type":"structure", "required":[ @@ -2956,7 +3243,7 @@ }, "Metadata":{ "shape":"Metadata", - "documentation":"

A set of metadata key-value pairs. Lists the metadata that the recovery point was created with.

" + "documentation":"

A set of metadata key-value pairs. Contains information, such as a resource name, required to restore a recovery point.

You can get configuration metadata about a resource at the time it was backed-up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists.

You need to specify specific metadata to restore an Amazon Elastic File System (Amazon EFS) instance:

  • file-system-id: ID of the Amazon EFS file system that is backed up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata.

  • Encrypted: A Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted must be set to true.

  • KmsKeyId: Specifies the AWS KMS key that is used to encrypt the restored file system.

  • PerformanceMode: Specifies the throughput mode of the file system.

  • CreationToken: A user-supplied value that ensures the uniqueness (idempotency) of the request.

  • newFileSystem: A Boolean value that, if true, specifies that the recovery point is restored to a new Amazon EFS file system.

" }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -2964,11 +3251,11 @@ }, "IdempotencyToken":{ "shape":"string", - "documentation":"

A customer chosen string that can be used to distinguish between calls to StartRestoreJob. Idempotency tokens time out after one hour. Therefore, if you call StartRestoreJob multiple times with the same idempotency token within one hour, AWS Backup recognizes that you are requesting only one restore job and initiates only one. If you change the idempotency token for each call, AWS Backup recognizes that you are requesting to start multiple restores.

" + "documentation":"

A customer chosen string that can be used to distinguish between calls to StartRestoreJob.

" }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

Starts a job to restore a recovery point for one of the following resources:

  • EBS for Amazon Elastic Block Store

  • SGW for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

" + "documentation":"

Starts a job to restore a recovery point for one of the following resources:

  • EBS for Amazon Elastic Block Store

  • Storage Gateway for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

" } } }, diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index a8f97a57..62bfde5f 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -94,7 +94,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deregisters an AWS Batch job definition.

" + "documentation":"

Deregisters an AWS Batch job definition. Job definitions will be permanently deleted after 180 days.

" }, "DescribeComputeEnvironments":{ "name":"DescribeComputeEnvironments", @@ -510,7 +510,7 @@ }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource in case not enough instances of the best fitting instance type can be allocated. This could be due to availability of the instance type in the region or Amazon EC2 service limits. If this is not specified, the default is BEST_FIT, which will use only the best fitting instance type, waiting for additional capacity if it's not available. This allocation strategy keeps costs lower but can limit scaling. BEST_FIT_PROGRESSIVE will select an additional instance type that is large enough to meet the requirements of the jobs in the queue, with a preference for an instance type with a lower cost. SPOT_CAPACITY_OPTIMIZED is only available for Spot Instance compute resources and will select an additional instance type that is large enough to meet the requirements of the jobs in the queue, with a preference for an instance type that is less likely to be interrupted.

" + "documentation":"

The allocation strategy to use for the compute resource in case not enough instances of the best fitting instance type can be allocated. This could be due to availability of the instance type in the region or Amazon EC2 service limits. If this is not specified, the default is BEST_FIT, which will use only the best fitting instance type, waiting for additional capacity if it's not available. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified. BEST_FIT_PROGRESSIVE will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per vCPU. SPOT_CAPACITY_OPTIMIZED is only available for Spot Instance compute resources and will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. For more information, see Allocation Strategies in the AWS Batch User Guide.

" }, "minvCpus":{ "shape":"Integer", @@ -562,7 +562,7 @@ }, "spotIamFleetRole":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. For more information, see Amazon EC2 Spot Fleet Role in the AWS Batch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This role is required if the allocation strategy set to BEST_FIT or if the allocation strategy is not specified. For more information, see Amazon EC2 Spot Fleet Role in the AWS Batch User Guide.

" }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", @@ -1239,7 +1239,7 @@ }, "dependsOn":{ "shape":"JobDependencyList", - "documentation":"

A list of job names or IDs on which this job depends.

" + "documentation":"

A list of job IDs on which this job depends.

" }, "jobDefinition":{ "shape":"String", @@ -1775,7 +1775,7 @@ }, "jobDefinition":{ "shape":"String", - "documentation":"

The job definition used by this job. This value can be either a name:revision or the Amazon Resource Name (ARN) for the job definition.

" + "documentation":"

The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used.

" }, "parameters":{ "shape":"ParametersMap", diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index 0ec264e8..11ae47f1 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -26,7 +26,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Creates a new Cost Category with the requested name and rules.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Creates a new Cost Category with the requested name and rules.

" }, "DeleteCostCategoryDefinition":{ "name":"DeleteCostCategoryDefinition", @@ -40,7 +40,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Deletes a Cost Category. Expenses from this month going forward will no longer be categorized with this Cost Category.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Deletes a Cost Category. Expenses from this month going forward will no longer be categorized with this Cost Category.

" }, "DescribeCostCategoryDefinition":{ "name":"DescribeCostCategoryDefinition", @@ -54,7 +54,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Returns the name, ARN, rules, definition, and effective dates of a Cost Category that's defined in the account.

You have the option to use EffectiveOn to return a Cost Category that is active on a specific date. If there is no EffectiveOn specified, you’ll see a Cost Category that is effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Returns the name, ARN, rules, definition, and effective dates of a Cost Category that's defined in the account.

You have the option to use EffectiveOn to return a Cost Category that is active on a specific date. If there is no EffectiveOn specified, you’ll see a Cost Category that is effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" }, "GetCostAndUsage":{ "name":"GetCostAndUsage", @@ -281,7 +281,7 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Returns the name, ARN and effective dates of all Cost Categories defined in the account. You have the option to use EffectiveOn to return a list of Cost Categories that were active on a specific date. If there is no EffectiveOn specified, you’ll see Cost Categories that are effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Returns the name, ARN and effective dates of all Cost Categories defined in the account. You have the option to use EffectiveOn to return a list of Cost Categories that were active on a specific date. If there is no EffectiveOn specified, you’ll see Cost Categories that are effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" }, "UpdateCostCategoryDefinition":{ "name":"UpdateCostCategoryDefinition", @@ -296,7 +296,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Updates an existing Cost Category. Changes made to the Cost Category rules will be used to categorize the current month’s expenses and future expenses. This won’t change categorization for the previous months.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Updates an existing Cost Category. Changes made to the Cost Category rules will be used to categorize the current month’s expenses and future expenses. This won’t change categorization for the previous months.

" } }, "shapes":{ @@ -367,7 +367,7 @@ "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" } }, - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The structure of Cost Categories. This includes detailed metadata and the set of rules for the CostCategory object.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The structure of Cost Categories. This includes detailed metadata and the set of rules for the CostCategory object.

" }, "CostCategoryName":{ "type":"string", @@ -393,7 +393,7 @@ "documentation":"

The Cost Category's effective end date.

" } }, - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" }, "CostCategoryReferencesList":{ "type":"list", @@ -412,7 +412,7 @@ "documentation":"

An Expression object used to categorize costs. This supports dimensions, Tags, and nested expressions. Currently the only dimensions supported is LINKED_ACCOUNT.

Root level OR is not supported. We recommend you create a separate rule instead.

" } }, - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" }, "CostCategoryRuleVersion":{ "type":"string", @@ -441,7 +441,7 @@ "documentation":"

The specific value of the Cost Category.

" } }, - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The values that are available for Cost Categories.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The values that are available for Cost Categories.

" }, "Coverage":{ "type":"structure", @@ -551,7 +551,7 @@ "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ "shape":"CostCategoryRulesList", - "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

CreateCostCategoryDefinition supports dimensions, Tags, and nested expressions. Currently the only dimensions supported is LINKED_ACCOUNT.

Root level OR is not supported. We recommend you create a separate rule instead.

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" } } }, @@ -939,7 +939,7 @@ }, "CostCategories":{ "shape":"CostCategoryValues", - "documentation":"

Cost Category is in preview release for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The specific CostCategory used for Expression.

" + "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The specific CostCategory used for Expression.

" } }, "documentation":"

Use Expression to filter by cost or by usage. There are two patterns:

  • Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. The Expression for that looks like this:

    { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", “us-west-1” ] } }

    The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.

  • Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). The Expression for that looks like this:

    { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }

    Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.

    { \"And\": [ ... ], \"DimensionValues\": { \"Dimension\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }

For GetRightsizingRecommendation action, a combination of OR and NOT is not supported. OR is not supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.

" @@ -1294,7 +1294,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • SCOPE

  • TENANCY

GetReservationUtilization uses the same Expression object as the other operations, but only AND is supported among each dimension, and nesting is supported up to only one level deep. If there are multiple values for a dimension, they are OR'd together.

" + "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • SCOPE

  • TENANCY

GetReservationUtilization uses the same Expression object as the other operations, but only AND is supported among each dimension, and nesting is supported up to only one level deep. If there are multiple values for a dimension, they are OR'd together.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -2817,7 +2817,7 @@ "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ "shape":"CostCategoryRulesList", - "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

UpdateCostCategoryDefinition supports dimensions, Tags, and nested expressions. Currently the only dimensions supported is LINKED_ACCOUNT.

Root level OR is not supported. We recommend you create a separate rule instead.

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" } } }, diff --git a/botocore/data/chime/2018-05-01/service-2.json b/botocore/data/chime/2018-05-01/service-2.json index 7a880c9c..97125229 100644 --- a/botocore/data/chime/2018-05-01/service-2.json +++ b/botocore/data/chime/2018-05-01/service-2.json @@ -73,6 +73,26 @@ ], "documentation":"

Associates phone numbers with the specified Amazon Chime Voice Connector group.

" }, + "AssociateSigninDelegateGroupsWithAccount":{ + "name":"AssociateSigninDelegateGroupsWithAccount", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}?operation=associate-signin-delegate-groups", + "responseCode":200 + }, + "input":{"shape":"AssociateSigninDelegateGroupsWithAccountRequest"}, + "output":{"shape":"AssociateSigninDelegateGroupsWithAccountResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Associates the specified sign-in delegate groups with the specified Amazon Chime account.

" + }, "BatchCreateAttendee":{ "name":"BatchCreateAttendee", "http":{ @@ -108,6 +128,7 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -151,7 +172,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Suspends up to 50 users from a Team or EnterpriseLWA Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Users suspended from a Team account are dissasociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team account users, invite them to the Team account again. You can use the InviteUsers action to do so.

Users suspended from an EnterpriseLWA account are immediately signed out of Amazon Chime and can no longer sign in. To remove the suspension from suspended EnterpriseLWA account users, use the BatchUnsuspendUser action.

To sign out users without suspending them, use the LogoutUser action.

" + "documentation":"

Suspends up to 50 users from a Team or EnterpriseLWA Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Users suspended from a Team account are disassociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team account users, invite them to the Team account again. You can use the InviteUsers action to do so.

Users suspended from an EnterpriseLWA account are immediately signed out of Amazon Chime and can no longer sign in. To remove the suspension from suspended EnterpriseLWA account users, use the BatchUnsuspendUser action.

To sign out users without suspending them, use the LogoutUser action.

" }, "BatchUnsuspendUser":{ "name":"BatchUnsuspendUser", @@ -270,7 +291,8 @@ {"shape":"BadRequestException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ResourceLimitExceededException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"} ], "documentation":"

Creates a bot for an Amazon Chime Enterprise account.

" }, @@ -330,6 +352,7 @@ {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -351,11 +374,33 @@ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], "documentation":"

Adds a member to a chat room. A member can be either a user or a bot. The member role designates whether the member is a chat room administrator or a general chat room member.

" }, + "CreateUser":{ + "name":"CreateUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/users?operation=create", + "responseCode":201 + }, + "input":{"shape":"CreateUserRequest"}, + "output":{"shape":"CreateUserResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a user under the specified Amazon Chime account.

" + }, "CreateVoiceConnector":{ "name":"CreateVoiceConnector", "http":{ @@ -507,6 +552,7 @@ {"shape":"ForbiddenException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -525,6 +571,7 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -706,6 +753,26 @@ ], "documentation":"

Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector group.

" }, + "DisassociateSigninDelegateGroupsFromAccount":{ + "name":"DisassociateSigninDelegateGroupsFromAccount", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}?operation=disassociate-signin-delegate-groups", + "responseCode":200 + }, + "input":{"shape":"DisassociateSigninDelegateGroupsFromAccountRequest"}, + "output":{"shape":"DisassociateSigninDelegateGroupsFromAccountResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Disassociates the specified sign-in delegate groups from the specified Amazon Chime account.

" + }, "GetAccount":{ "name":"GetAccount", "http":{ @@ -779,7 +846,8 @@ {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"BadRequestException"} + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"} ], "documentation":"

Retrieves details for the specified bot, such as bot email address, bot type, status, and display name.

" }, @@ -912,10 +980,11 @@ {"shape":"ForbiddenException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves room details, such as name.

" + "documentation":"

Retrieves room details, such as the room name.

" }, "GetUser":{ "name":"GetUser", @@ -1171,7 +1240,8 @@ {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"} ], "documentation":"

Lists the bots associated with the administrator's Amazon Chime Enterprise account ID.

" }, @@ -1245,10 +1315,11 @@ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the membership details for the specified room, such as member IDs, member email addresses, and member names.

" + "documentation":"

Lists the membership details for the specified room, such as the members' IDs, email addresses, and names.

" }, "ListRooms":{ "name":"ListRooms", @@ -1264,6 +1335,7 @@ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -1502,7 +1574,8 @@ {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"} ], "documentation":"

Regenerates the security token for a bot.

" }, @@ -1622,7 +1695,8 @@ {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"} ], "documentation":"

Updates the status of the specified bot, such as starting or stopping the bot from running in your Amazon Chime Enterprise account.

" }, @@ -1696,6 +1770,7 @@ {"shape":"ForbiddenException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -1715,10 +1790,11 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates room membership details, such as member role. The member role designates whether the member is a chat room administrator or a general chat room member. Member role can only be updated for user IDs.

" + "documentation":"

Updates room membership details, such as the member role. The member role designates whether the member is a chat room administrator or a general chat room member. The member role can be updated only for user IDs.

" }, "UpdateUser":{ "name":"UpdateUser", @@ -1847,6 +1923,10 @@ "SupportedLicenses":{ "shape":"LicenseList", "documentation":"

Supported licenses for the Amazon Chime account.

" + }, + "SigninDelegateGroups":{ + "shape":"SigninDelegateGroupList", + "documentation":"

The sign-in delegate groups associated with the account.

" } }, "documentation":"

The Amazon Chime account details. An AWS account can have multiple Amazon Chime accounts.

" @@ -1884,6 +1964,20 @@ "EnterpriseOIDC" ] }, + "AlexaForBusinessMetadata":{ + "type":"structure", + "members":{ + "IsAlexaForBusinessEnabled":{ + "shape":"Boolean", + "documentation":"

Starts or stops Alexa for Business.

" + }, + "AlexaForBusinessRoomArn":{ + "shape":"SensitiveString", + "documentation":"

The ARN of the room resource.

" + } + }, + "documentation":"

The Alexa for Business metadata associated with an Amazon Chime user, used to integrate Alexa for Business with a device.

" + }, "Arn":{ "type":"string", "max":1024, @@ -1980,6 +2074,30 @@ } } }, + "AssociateSigninDelegateGroupsWithAccountRequest":{ + "type":"structure", + "required":[ + "AccountId", + "SigninDelegateGroups" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "SigninDelegateGroups":{ + "shape":"SigninDelegateGroupList", + "documentation":"

The sign-in delegate groups.

" + } + } + }, + "AssociateSigninDelegateGroupsWithAccountResponse":{ + "type":"structure", + "members":{ + } + }, "Attendee":{ "type":"structure", "members":{ @@ -2434,7 +2552,7 @@ }, "MediaRegion":{ "shape":"String", - "documentation":"

The Region in which to create the meeting. Available values: us-east-1, us-west-2.

" + "documentation":"

The Region in which to create the meeting. Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" }, "NotificationsConfiguration":{ "shape":"MeetingNotificationConfiguration", @@ -2549,6 +2667,36 @@ } } }, + "CreateUserRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "Username":{ + "shape":"String", + "documentation":"

The user name.

" + }, + "Email":{ + "shape":"EmailAddress", + "documentation":"

The user's email address.

" + }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" + } + } + }, + "CreateUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"} + } + }, "CreateVoiceConnectorGroupRequest":{ "type":"structure", "required":["Name"], @@ -2908,6 +3056,30 @@ } } }, + "DisassociateSigninDelegateGroupsFromAccountRequest":{ + "type":"structure", + "required":[ + "AccountId", + "GroupNames" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "GroupNames":{ + "shape":"NonEmptyStringList", + "documentation":"

The sign-in delegate group names.

" + } + } + }, + "DisassociateSigninDelegateGroupsFromAccountResponse":{ + "type":"structure", + "members":{ + } + }, "E164PhoneNumber":{ "type":"string", "pattern":"^\\+?[1-9]\\d{1,14}$", @@ -3495,6 +3667,10 @@ "UserEmailList":{ "shape":"UserEmailList", "documentation":"

The user email addresses to which to send the email invitation.

" + }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" } } }, @@ -3865,6 +4041,12 @@ "location":"querystring", "locationName":"user-email" }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

", + "location":"querystring", + "locationName":"user-type" + }, "MaxResults":{ "shape":"ProfileServiceMaxResults", "documentation":"

The maximum number of results to return in a single call. Defaults to 100.

", @@ -4052,7 +4234,7 @@ }, "MediaRegion":{ "shape":"String", - "documentation":"

The Region in which to create the meeting. Available values: us-east-1, us-west-2.

" + "documentation":"

The Region in which to create the meeting. Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" } }, "documentation":"

A meeting created using the Amazon Chime SDK.

" @@ -4901,6 +5083,20 @@ "exception":true, "fault":true }, + "SigninDelegateGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"NonEmptyString", + "documentation":"

The group name.

" + } + }, + "documentation":"

An Active Directory (AD) group whose members are granted permission to act as delegates.

" + }, + "SigninDelegateGroupList":{ + "type":"list", + "member":{"shape":"SigninDelegateGroup"} + }, "StreamingConfiguration":{ "type":"structure", "required":["DataRetentionInHours"], @@ -5279,6 +5475,14 @@ "LicenseType":{ "shape":"License", "documentation":"

The user license type to update. This must be a supported license type for the Amazon Chime account that the user belongs to.

" + }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" + }, + "AlexaForBusinessMetadata":{ + "shape":"AlexaForBusinessMetadata", + "documentation":"

The Alexa for Business metadata.

" } } }, @@ -5293,6 +5497,14 @@ "LicenseType":{ "shape":"License", "documentation":"

The user license type.

" + }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" + }, + "AlexaForBusinessMetadata":{ + "shape":"AlexaForBusinessMetadata", + "documentation":"

The Alexa for Business metadata.

" } }, "documentation":"

The user ID and user fields to update, used with the BatchUpdateUser action.

" @@ -5435,6 +5647,10 @@ "shape":"License", "documentation":"

The license type for the user.

" }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" + }, "UserRegistrationStatus":{ "shape":"RegistrationStatus", "documentation":"

The user registration status.

" @@ -5451,6 +5667,10 @@ "shape":"Iso8601Timestamp", "documentation":"

Date and time when the user is invited to the Amazon Chime account, in ISO 8601 format.

" }, + "AlexaForBusinessMetadata":{ + "shape":"AlexaForBusinessMetadata", + "documentation":"

The Alexa for Business metadata.

" + }, "PersonalPIN":{ "shape":"String", "documentation":"

The user's personal meeting PIN.

" @@ -5505,6 +5725,13 @@ }, "documentation":"

Settings associated with an Amazon Chime user, including inbound and outbound calling and text messaging.

" }, + "UserType":{ + "type":"string", + "enum":[ + "PrivateUser", + "SharedDevice" + ] + }, "VoiceConnector":{ "type":"structure", "members":{ diff --git a/botocore/data/cloudfront/2019-03-26/service-2.json b/botocore/data/cloudfront/2019-03-26/service-2.json index 1ab4369a..f93309e8 100644 --- a/botocore/data/cloudfront/2019-03-26/service-2.json +++ b/botocore/data/cloudfront/2019-03-26/service-2.json @@ -923,7 +923,7 @@ }, "ICPRecordalStatus":{ "shape":"ICPRecordalStatus", - "documentation":"

The Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus is set to APPROVED for all CNAMEs (aliases) in regions outside of China.

The status values returned are the following:

  • APPROVED indicates that the associated CNAME has a valid ICP recordal number. Multiple CNAMEs can be associated with a distribution, and CNAMEs can correspond to different ICP recordals. To be marked as APPROVED, that is, valid to use with China region, a CNAME must have one ICP recordal number associated with it.

  • SUSPENDED indicates that the associated CNAME does not have a valid ICP recordal number.

  • PENDING indicates that at least one CNAME associated with the distribution does not have a valid ICP recordal number.

" + "documentation":"

The Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus is set to APPROVED for all CNAMEs (aliases) in regions outside of China.

The status values returned are the following:

  • APPROVED indicates that the associated CNAME has a valid ICP recordal number. Multiple CNAMEs can be associated with a distribution, and CNAMEs can correspond to different ICP recordals. To be marked as APPROVED, that is, valid to use with China region, a CNAME must have one ICP recordal number associated with it.

  • SUSPENDED indicates that the associated CNAME does not have a valid ICP recordal number.

  • PENDING indicates that CloudFront can't determine the ICP recordal status of the CNAME associated with the distribution because there was an error in trying to determine the status. You can try again to see if the error is resolved in which case CloudFront returns an APPROVED or SUSPENDED status.

" } }, "documentation":"

AWS services in China customers must file for an Internet Content Provider (ICP) recordal if they want to serve content publicly on an alternate domain name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal status for CNAMEs associated with distributions. The status is returned in the CloudFront response; you can't configure it yourself.

For more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with AWS services in China.

" @@ -1022,7 +1022,7 @@ }, "ForwardedValues":{ "shape":"ForwardedValues", - "documentation":"

A complex type that specifies how CloudFront handles query strings and cookies.

" + "documentation":"

A complex type that specifies how CloudFront handles query strings, cookies, and HTTP headers.

" }, "TrustedSigners":{ "shape":"TrustedSigners", @@ -1321,11 +1321,11 @@ "members":{ "Quantity":{ "shape":"integer", - "documentation":"

The number of different cookies that you want CloudFront to forward to the origin for this cache behavior.

" + "documentation":"

The number of different cookies that you want CloudFront to forward to the origin for this cache behavior. The value must equal the number of items that are in the Items field.

When you set Forward = whitelist (in the CookiePreferences object), this value must be 1 or higher.

" }, "Items":{ "shape":"CookieNameList", - "documentation":"

A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior.

" + "documentation":"

A complex type that contains one Name element for each cookie that you want CloudFront to forward to the origin for this cache behavior. It must contain the same number of items that is specified in the Quantity field.

When you set Forward = whitelist (in the CookiePreferences object), this field must contain at least one item.

" } }, "documentation":"

A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see Caching Content Based on Request Headers in the Amazon CloudFront Developer Guide.

" @@ -1336,11 +1336,11 @@ "members":{ "Forward":{ "shape":"ItemSelection", - "documentation":"

Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.

Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.

" + "documentation":"

Specifies which cookies to forward to the origin for this cache behavior: all, none, or the list of cookies specified in the WhitelistedNames complex type.

Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an Amazon S3 origin, specify none for the Forward element.

" }, "WhitelistedNames":{ "shape":"CookieNames", - "documentation":"

Required if you specify whitelist for the value of Forward:. A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.

If you specify all or none for the value of Forward, omit WhitelistedNames. If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.

For the current limit on the number of cookie names that you can whitelist for each cache behavior, see CloudFront Limits in the AWS General Reference.

" + "documentation":"

Required if you specify whitelist for the value of Forward. A complex type that specifies how many different cookies you want CloudFront to forward to the origin for this cache behavior and, if you want to forward selected cookies, the names of those cookies.

If you specify all or none for the value of Forward, omit WhitelistedNames. If you change the value of Forward from whitelist to all or none and you don't delete the WhitelistedNames element and its child elements, CloudFront deletes them automatically.

For the current limit on the number of cookie names that you can whitelist for each cache behavior, see CloudFront Limits in the AWS General Reference.

" } }, "documentation":"

A complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see Caching Content Based on Cookies in the Amazon CloudFront Developer Guide.

" @@ -1685,15 +1685,15 @@ }, "ResponsePagePath":{ "shape":"string", - "documentation":"

The path to the custom error page that you want CloudFront to return to a viewer when your origin returns the HTTP status code specified by ErrorCode, for example, /4xx-errors/403-forbidden.html. If you want to store your objects and your custom error pages in different locations, your distribution must include a cache behavior for which the following is true:

  • The value of PathPattern matches the path to your custom error messages. For example, suppose you saved custom error pages for 4xx errors in an Amazon S3 bucket in a directory named /4xx-errors. Your distribution must include a cache behavior for which the path pattern routes requests for your custom error pages to that location, for example, /4xx-errors/*.

  • The value of TargetOriginId specifies the value of the ID element for the origin that contains your custom error pages.

If you specify a value for ResponsePagePath, you must also specify a value for ResponseCode. If you don't want to specify a value, include an empty element, <ResponsePagePath>, in the XML document.

We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom error pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get the files that you want to return to viewers because the origin server is unavailable.

" + "documentation":"

The path to the custom error page that you want CloudFront to return to a viewer when your origin returns the HTTP status code specified by ErrorCode, for example, /4xx-errors/403-forbidden.html. If you want to store your objects and your custom error pages in different locations, your distribution must include a cache behavior for which the following is true:

  • The value of PathPattern matches the path to your custom error messages. For example, suppose you saved custom error pages for 4xx errors in an Amazon S3 bucket in a directory named /4xx-errors. Your distribution must include a cache behavior for which the path pattern routes requests for your custom error pages to that location, for example, /4xx-errors/*.

  • The value of TargetOriginId specifies the value of the ID element for the origin that contains your custom error pages.

If you specify a value for ResponsePagePath, you must also specify a value for ResponseCode.

We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom error pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get the files that you want to return to viewers because the origin server is unavailable.

" }, "ResponseCode":{ "shape":"string", - "documentation":"

The HTTP status code that you want CloudFront to return to the viewer along with the custom error page. There are a variety of reasons that you might want CloudFront to return a status code different from the status code that your origin returned to CloudFront, for example:

  • Some Internet devices (some firewalls and corporate proxies, for example) intercept HTTP 4xx and 5xx and prevent the response from being returned to the viewer. If you substitute 200, the response typically won't be intercepted.

  • If you don't care about distinguishing among different client errors or server errors, you can specify 400 or 500 as the ResponseCode for all 4xx or 5xx errors.

  • You might want to return a 200 status code (OK) and static website so your customers don't know that your website is down.

If you specify a value for ResponseCode, you must also specify a value for ResponsePagePath. If you don't want to specify a value, include an empty element, <ResponseCode>, in the XML document.

" + "documentation":"

The HTTP status code that you want CloudFront to return to the viewer along with the custom error page. There are a variety of reasons that you might want CloudFront to return a status code different from the status code that your origin returned to CloudFront, for example:

  • Some Internet devices (some firewalls and corporate proxies, for example) intercept HTTP 4xx and 5xx and prevent the response from being returned to the viewer. If you substitute 200, the response typically won't be intercepted.

  • If you don't care about distinguishing among different client errors or server errors, you can specify 400 or 500 as the ResponseCode for all 4xx or 5xx errors.

  • You might want to return a 200 status code (OK) and static website so your customers don't know that your website is down.

If you specify a value for ResponseCode, you must also specify a value for ResponsePagePath.

" }, "ErrorCachingMinTTL":{ "shape":"long", - "documentation":"

The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status code specified in ErrorCode. When this time period has elapsed, CloudFront queries your origin to see whether the problem that caused the error has been resolved and the requested object is now available.

If you don't want to specify a value, include an empty element, <ErrorCachingMinTTL>, in the XML document.

For more information, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" + "documentation":"

The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status code specified in ErrorCode. When this time period has elapsed, CloudFront queries your origin to see whether the problem that caused the error has been resolved and the requested object is now available.

For more information, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" } }, "documentation":"

A complex type that controls:

  • Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.

  • How long CloudFront caches HTTP status codes in the 4xx and 5xx range.

For more information about custom error pages, see Customizing Error Responses in the Amazon CloudFront Developer Guide.

" @@ -1786,7 +1786,7 @@ }, "ForwardedValues":{ "shape":"ForwardedValues", - "documentation":"

A complex type that specifies how CloudFront handles query strings and cookies.

" + "documentation":"

A complex type that specifies how CloudFront handles query strings, cookies, and HTTP headers.

" }, "TrustedSigners":{ "shape":"TrustedSigners", @@ -2060,7 +2060,7 @@ }, "ViewerCertificate":{ "shape":"ViewerCertificate", - "documentation":"

A complex type that specifies whether you want viewers to use HTTP or HTTPS to request your objects, whether you're using an alternate domain name with HTTPS, and if so, if you're using AWS Certificate Manager (ACM) or a third-party certificate authority.

" + "documentation":"

A complex type that determines the distribution’s SSL/TLS configuration for communicating with viewers.

" }, "Restrictions":{ "shape":"Restrictions", @@ -2068,7 +2068,7 @@ }, "WebACLId":{ "shape":"string", - "documentation":"

A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.

AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF, see the AWS WAF Developer Guide.

" + "documentation":"

A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example 473e64fd-f30b-4765-81a0-62ad96dd167a.

AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF, see the AWS WAF Developer Guide.

" }, "HttpVersion":{ "shape":"HttpVersion", @@ -2225,7 +2225,7 @@ }, "ViewerCertificate":{ "shape":"ViewerCertificate", - "documentation":"

A complex type that specifies whether you want viewers to use HTTP or HTTPS to request your objects, whether you're using an alternate domain name with HTTPS, and if so, if you're using AWS Certificate Manager (ACM) or a third-party certificate authority.

" + "documentation":"

A complex type that determines the distribution’s SSL/TLS configuration for communicating with viewers.

" }, "Restrictions":{ "shape":"Restrictions", @@ -2630,7 +2630,7 @@ "documentation":"

A complex type that contains information about the query string parameters that you want CloudFront to use for caching for this cache behavior.

" } }, - "documentation":"

A complex type that specifies how CloudFront handles query strings and cookies.

" + "documentation":"

A complex type that specifies how CloudFront handles query strings, cookies, and HTTP headers.

" }, "GeoRestriction":{ "type":"structure", @@ -3247,7 +3247,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

Query string parameters specified in the response body are not valid.

", + "documentation":"

The query string parameters specified are not valid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3274,7 +3274,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

A response code specified in the response body is not valid.

", + "documentation":"

A response code is not valid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3283,7 +3283,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

TTL order specified in the response body is not valid.

", + "documentation":"

The TTL order specified is not valid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3292,7 +3292,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

Tagging specified in the response body is not valid.

", + "documentation":"

The tagging specified is not valid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3301,7 +3301,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

A viewer certificate specified in the response body is not valid.

", + "documentation":"

A viewer certificate specified is not valid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3310,7 +3310,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

A web ACL id specified in the response body is not valid.

", + "documentation":"

A web ACL ID specified is not valid. To specify a web ACL created using the latest version of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example 473e64fd-f30b-4765-81a0-62ad96dd167a.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -5391,36 +5391,36 @@ "members":{ "CloudFrontDefaultCertificate":{ "shape":"boolean", - "documentation":"

If you're using the CloudFront domain name for your distribution, such as d111111abcdef8.cloudfront.net, specify the following value:

  • <CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate>

" + "documentation":"

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net, set this field to true.

If the distribution uses Aliases (alternate domain names or CNAMEs), set this field to false and specify values for the following fields:

  • ACMCertificateArn or IAMCertificateId (specify a value for one, not both)

  • MinimumProtocolVersion

  • SSLSupportMethod

" }, "IAMCertificateId":{ "shape":"string", - "documentation":"

If you want viewers to use HTTPS to request your objects and you're using an alternate domain name, you must choose the type of certificate that you want to use. Specify the following value if you purchased your certificate from a third-party certificate authority:

  • <IAMCertificateId>IAM certificate ID<IAMCertificateId> where IAM certificate ID is the ID that IAM returned when you added the certificate to the IAM certificate store.

If you specify IAMCertificateId, you must also specify a value for SSLSupportMethod.

" + "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs) and the SSL/TLS certificate is stored in AWS Identity and Access Management (AWS IAM), provide the ID of the IAM certificate.

If you specify an IAM certificate ID, you must also specify values for MinimumProtocolVerison and SSLSupportMethod.

" }, "ACMCertificateArn":{ "shape":"string", - "documentation":"

If you want viewers to use HTTPS to request your objects and you're using an alternate domain name, you must choose the type of certificate that you want to use. Specify the following value if ACM provided your certificate:

  • <ACMCertificateArn>ARN for ACM SSL/TLS certificate<ACMCertificateArn> where ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate that you want to use for this distribution.

If you specify ACMCertificateArn, you must also specify a value for SSLSupportMethod.

" + "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs) and the SSL/TLS certificate is stored in AWS Certificate Manager (ACM), provide the Amazon Resource Name (ARN) of the ACM certificate. CloudFront only supports ACM certificates in the US East (N. Virginia) Region (us-east-1).

If you specify an ACM certificate ARN, you must also specify values for MinimumProtocolVerison and SSLSupportMethod.

" }, "SSLSupportMethod":{ "shape":"SSLSupportMethod", - "documentation":"

If you specify a value for ACMCertificateArn or for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests: using a method that works for browsers and clients released after 2010 or one that works for all clients.

  • sni-only: CloudFront can respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but there are a few that don't. For a current list of the browsers that support SNI, see the Wikipedia entry Server Name Indication. To learn about options to explore if you have users with browsers that don't include SNI support, see Choosing How CloudFront Serves HTTPS Requests in the Amazon CloudFront Developer Guide.

  • vip: CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, there are additional monthly charges. For details, including specific pricing information, see Custom SSL options for Amazon CloudFront on the AWS marketing site.

Don't specify a value for SSLSupportMethod if you specified <CloudFrontDefaultCertificate>true<CloudFrontDefaultCertificate>.

For more information, see Choosing How CloudFront Serves HTTPS Requests in the Amazon CloudFront Developer Guide.

" + "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs), specify which viewers the distribution accepts HTTPS connections from.

  • sni-only – The distribution accepts HTTPS connections from only viewers that support server name indication (SNI). This is recommended. Most browsers and clients released after 2010 support SNI.

  • vip – The distribution accepts HTTPS connections from all viewers including those that don’t support SNI. This is not recommended, and results in additional monthly charges from CloudFront.

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net, don’t set a value for this field.

" }, "MinimumProtocolVersion":{ "shape":"MinimumProtocolVersion", - "documentation":"

Specify the security policy that you want CloudFront to use for HTTPS connections. A security policy determines two settings:

  • The minimum SSL/TLS protocol that CloudFront uses to communicate with viewers

  • The cipher that CloudFront uses to encrypt the content that it returns to viewers

On the CloudFront console, this setting is called Security policy.

We recommend that you specify TLSv1.1_2016 unless your users are using browsers or devices that do not support TLSv1.1 or later.

When both of the following are true, you must specify TLSv1 or later for the security policy:

  • You're using a custom certificate: you specified a value for ACMCertificateArn or for IAMCertificateId

  • You're using SNI: you specified sni-only for SSLSupportMethod

If you specify true for CloudFrontDefaultCertificate, CloudFront automatically sets the security policy to TLSv1 regardless of the value that you specify for MinimumProtocolVersion.

For information about the relationship between the security policy that you choose and the protocols and ciphers that CloudFront uses to communicate with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

" + "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs), specify the security policy that you want CloudFront to use for HTTPS connections with viewers. The security policy determines two settings:

  • The minimum SSL/TLS protocol that CloudFront can use to communicate with viewers.

  • The ciphers that CloudFront can use to encrypt the content that it returns to viewers.

For more information, see Security Policy and Supported Protocols and Ciphers Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

On the CloudFront console, this setting is called Security Policy.

We recommend that you specify TLSv1.2_2018 unless your viewers are using browsers or devices that don’t support TLSv1.2.

When you’re using SNI only (you set SSLSupportMethod to sni-only), you must specify TLSv1 or higher.

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net (you set CloudFrontDefaultCertificate to true), CloudFront automatically sets the security policy to TLSv1 regardless of the value that you set here.

" }, "Certificate":{ "shape":"string", - "documentation":"

This field is no longer used. Use one of the following fields instead:

", + "documentation":"

This field is deprecated. Use one of the following fields instead:

  • ACMCertificateArn

  • IAMCertificateId

  • CloudFrontDefaultCertificate

", "deprecated":true }, "CertificateSource":{ "shape":"CertificateSource", - "documentation":"

This field is no longer used. Use one of the following fields instead:

", + "documentation":"

This field is deprecated. Use one of the following fields instead:

  • ACMCertificateArn

  • IAMCertificateId

  • CloudFrontDefaultCertificate

", "deprecated":true } }, - "documentation":"

A complex type that specifies the following:

  • Whether you want viewers to use HTTP or HTTPS to request your objects.

  • If you want viewers to use HTTPS, whether you're using an alternate domain name such as example.com or the CloudFront domain name for your distribution, such as d111111abcdef8.cloudfront.net.

  • If you're using an alternate domain name, whether AWS Certificate Manager (ACM) provided the certificate, or you purchased a certificate from a third-party certificate authority and imported it into ACM or uploaded it to the IAM certificate store.

Specify only one of the following values:

For more information, see Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide.

" + "documentation":"

A complex type that determines the distribution’s SSL/TLS configuration for communicating with viewers.

If the distribution doesn’t use Aliases (also known as alternate domain names or CNAMEs)—that is, if the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net—set CloudFrontDefaultCertificate to true and leave all other fields empty.

If the distribution uses Aliases (alternate domain names or CNAMEs), use the fields in this type to specify the following settings:

  • Which viewers the distribution accepts HTTPS connections from: only viewers that support server name indication (SNI) (recommended), or all viewers including those that don’t support SNI.

    • To accept HTTPS connections from only viewers that support SNI, set SSLSupportMethod to sni-only. This is recommended. Most browsers and clients released after 2010 support SNI.

    • To accept HTTPS connections from all viewers, including those that don’t support SNI, set SSLSupportMethod to vip. This is not recommended, and results in additional monthly charges from CloudFront.

  • The minimum SSL/TLS protocol version that the distribution can use to communicate with viewers. To specify a minimum version, choose a value for MinimumProtocolVersion. For more information, see Security Policy in the Amazon CloudFront Developer Guide.

  • The location of the SSL/TLS certificate, AWS Certificate Manager (ACM) (recommended) or AWS Identity and Access Management (AWS IAM). You specify the location by setting a value in one of the following fields (not both):

    • ACMCertificateArn

    • IAMCertificateId

All distributions support HTTPS connections from viewers. To require viewers to use HTTPS only, or to redirect them from HTTP to HTTPS, use ViewerProtocolPolicy in the CacheBehavior or DefaultCacheBehavior. To specify how CloudFront should use SSL/TLS to communicate with your custom origin, use CustomOriginConfig.

For more information, see Using HTTPS with CloudFront and Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide.

" }, "ViewerProtocolPolicy":{ "type":"string", diff --git a/botocore/data/cloudhsmv2/2017-04-28/service-2.json b/botocore/data/cloudhsmv2/2017-04-28/service-2.json index 31c7a261..51c2a78b 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/service-2.json +++ b/botocore/data/cloudhsmv2/2017-04-28/service-2.json @@ -23,11 +23,12 @@ "input":{"shape":"CopyBackupToRegionRequest"}, "output":{"shape":"CopyBackupToRegionResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Copy an AWS CloudHSM cluster backup to a different region.

" }, @@ -40,11 +41,12 @@ "input":{"shape":"CreateClusterRequest"}, "output":{"shape":"CreateClusterResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Creates a new AWS CloudHSM cluster.

" }, @@ -74,13 +76,13 @@ "input":{"shape":"DeleteBackupRequest"}, "output":{"shape":"DeleteBackupResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"} ], - "documentation":"

Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request. For more information on restoring a backup, see RestoreBackup

" + "documentation":"

Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup.

" }, "DeleteCluster":{ "name":"DeleteCluster", @@ -91,11 +93,12 @@ "input":{"shape":"DeleteClusterRequest"}, "output":{"shape":"DeleteClusterResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To delete an HSM, use DeleteHsm.

" }, @@ -125,11 +128,12 @@ "input":{"shape":"DescribeBackupsRequest"}, "output":{"shape":"DescribeBackupsResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Gets information about backups of AWS CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get.

" }, @@ -142,10 +146,11 @@ "input":{"shape":"DescribeClustersRequest"}, "output":{"shape":"DescribeClustersResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Gets information about AWS CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get.

" }, @@ -158,11 +163,11 @@ "input":{"shape":"InitializeClusterRequest"}, "output":{"shape":"InitializeClusterResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"} ], "documentation":"

Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters.

" }, @@ -175,11 +180,12 @@ "input":{"shape":"ListTagsRequest"}, "output":{"shape":"ListTagsResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Gets a list of tags for the specified AWS CloudHSM cluster.

This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

" }, @@ -192,13 +198,13 @@ "input":{"shape":"RestoreBackupRequest"}, "output":{"shape":"RestoreBackupResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"} ], - "documentation":"

Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For more information on deleting a backup, see DeleteBackup.

" + "documentation":"

Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor information on deleting a backup, see DeleteBackup.

" }, "TagResource":{ "name":"TagResource", @@ -209,11 +215,12 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Adds or overwrites one or more tags for the specified AWS CloudHSM cluster.

" }, @@ -226,11 +233,12 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, - {"shape":"CloudHsmServiceException"}, - {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, - {"shape":"CloudHsmAccessDeniedException"} + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmTagException"} ], "documentation":"

Removes the specified tag or tags from the specified AWS CloudHSM cluster.

" } @@ -256,16 +264,29 @@ "shape":"Timestamp", "documentation":"

The date and time when the backup was created.

" }, - "CopyTimestamp":{"shape":"Timestamp"}, - "SourceRegion":{"shape":"Region"}, - "SourceBackup":{"shape":"BackupId"}, - "SourceCluster":{"shape":"ClusterId"}, + "CopyTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time when the backup was copied from a source backup.

" + }, + "SourceRegion":{ + "shape":"Region", + "documentation":"

The AWS region that contains the source backup from which the new backup was copied.

" + }, + "SourceBackup":{ + "shape":"BackupId", + "documentation":"

The identifier (ID) of the source backup from which the new backup was copied.

" + }, + "SourceCluster":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster containing the source backup from which the new backup was copied. .

" + }, "DeleteTimestamp":{ "shape":"Timestamp", "documentation":"

The date and time when the backup will be permanently deleted.

" - } + }, + "TagList":{"shape":"TagList"} }, - "documentation":"

Contains information about a backup of an AWS CloudHSM cluster.

" + "documentation":"

Contains information about a backup of an AWS CloudHSM cluster. All backup objects contain the BackupId, BackupState, ClusterId, and CreateTimestamp parameters. Backups that were copied into a destination region additionally contain the CopyTimestamp, SourceBackup, SourceCluster, and SourceRegion paramters. A backup that is pending deletion will include the DeleteTimestamp parameter.

" }, "BackupId":{ "type":"string", @@ -361,6 +382,13 @@ "documentation":"

The request was rejected because an error occurred.

", "exception":true }, + "CloudHsmTagException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "exception":true + }, "Cluster":{ "type":"structure", "members":{ @@ -406,7 +434,7 @@ }, "SubnetMapping":{ "shape":"ExternalSubnetMapping", - "documentation":"

A map of the cluster's subnets and their corresponding Availability Zones.

" + "documentation":"

A map from availability zone to the cluster’s subnet in that availability zone.

" }, "VpcId":{ "shape":"VpcId", @@ -415,7 +443,8 @@ "Certificates":{ "shape":"Certificates", "documentation":"

Contains one or more certificates or a certificate signing request (CSR).

" - } + }, + "TagList":{"shape":"TagList"} }, "documentation":"

Contains information about an AWS CloudHSM cluster.

" }, @@ -455,7 +484,8 @@ "BackupId":{ "shape":"BackupId", "documentation":"

The ID of the backup that will be copied to the destination region.

" - } + }, + "TagList":{"shape":"TagList"} } }, "CopyBackupToRegionResponse":{ @@ -485,7 +515,8 @@ "SourceBackupId":{ "shape":"BackupId", "documentation":"

The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups.

" - } + }, + "TagList":{"shape":"TagList"} } }, "CreateClusterResponse":{ @@ -611,7 +642,10 @@ "shape":"Filters", "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation.

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

" }, - "SortAscending":{"shape":"Boolean"} + "SortAscending":{ + "shape":"Boolean", + "documentation":"

Designates whether or not to sort the return backups by ascending chronological order of generation.

" + } } }, "DescribeBackupsResponse":{ @@ -660,11 +694,24 @@ "DestinationBackup":{ "type":"structure", "members":{ - "CreateTimestamp":{"shape":"Timestamp"}, - "SourceRegion":{"shape":"Region"}, - "SourceBackup":{"shape":"BackupId"}, - "SourceCluster":{"shape":"ClusterId"} - } + "CreateTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time when both the source backup was created.

" + }, + "SourceRegion":{ + "shape":"Region", + "documentation":"

The AWS region that contains the source backup from which the new backup was copied.

" + }, + "SourceBackup":{ + "shape":"BackupId", + "documentation":"

The identifier (ID) of the source backup from which the new backup was copied.

" + }, + "SourceCluster":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster containing the source backup from which the new backup was copied.

" + } + }, + "documentation":"

Contains information about the backup that will be copied and created by the CopyBackupToRegion operation.

" }, "EniId":{ "type":"string", @@ -767,7 +814,7 @@ }, "TrustAnchor":{ "shape":"Cert", - "documentation":"

The issuing certificate of the issuing certificate authority (CA) that issued (signed) the cluster certificate. This can be a root (self-signed) certificate or a certificate chain that begins with the certificate that issued the cluster certificate and ends with a root certificate. The certificate or certificate chain must be in PEM format and can contain a maximum of 5000 characters.

" + "documentation":"

The issuing certificate of the issuing certificate authority (CA) that issued (signed) the cluster certificate. You must use a self-signed certificate. The certificate used to sign the HSM CSR must be directly available, and thus must be the root certificate. The certificate must be in PEM format and can contain a maximum of 5000 characters.

" } } }, @@ -793,7 +840,7 @@ "required":["ResourceId"], "members":{ "ResourceId":{ - "shape":"ClusterId", + "shape":"ResourceId", "documentation":"

The cluster identifier (ID) for the cluster whose tags you are getting. To find the cluster ID, use DescribeClusters.

" }, "NextToken":{ @@ -839,6 +886,10 @@ "type":"string", "pattern":"[a-z]{2}(-(gov))?-(east|west|north|south|central){1,2}-\\d" }, + "ResourceId":{ + "type":"string", + "pattern":"(?:cluster|backup)-[2-7a-zA-Z]{11,16}" + }, "RestoreBackupRequest":{ "type":"structure", "required":["BackupId"], @@ -860,7 +911,7 @@ }, "SecurityGroup":{ "type":"string", - "pattern":"sg-[0-9a-fA-F]" + "pattern":"sg-[0-9a-fA-F]{8,17}" }, "StateMessage":{ "type":"string", @@ -926,7 +977,7 @@ ], "members":{ "ResourceId":{ - "shape":"ClusterId", + "shape":"ResourceId", "documentation":"

The cluster identifier (ID) for the cluster that you are tagging. To find the cluster ID, use DescribeClusters.

" }, "TagList":{ @@ -955,7 +1006,7 @@ ], "members":{ "ResourceId":{ - "shape":"ClusterId", + "shape":"ResourceId", "documentation":"

The cluster identifier (ID) for the cluster whose tags you are removing. To find the cluster ID, use DescribeClusters.

" }, "TagKeyList":{ @@ -975,5 +1026,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

For more information about AWS CloudHSM, see AWS CloudHSM and the AWS CloudHSM User Guide.

" + "documentation":"

For more information about AWS CloudHSM, see AWS CloudHSM and the AWS CloudHSM User Guide.

" } diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index df37bfcb..98624280 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -568,6 +568,10 @@ "Configuration":{ "shape":"AnomalyDetectorConfiguration", "documentation":"

The configuration specifies details about how the anomaly detection model is to be trained, including time ranges to exclude from use for training the model, and the time zone to use for the metric.

" + }, + "StateValue":{ + "shape":"AnomalyDetectorStateValue", + "documentation":"

The current status of the anomaly detector's training. The possible values are TRAINED | PENDING_TRAINING | TRAINED_INSUFFICIENT_DATA

" } }, "documentation":"

An anomaly detection model associated with a particular CloudWatch metric and statistic. You can use the model to display a band of expected normal values when the metric is graphed.

" @@ -591,6 +595,14 @@ "member":{"shape":"Range"} }, "AnomalyDetectorMetricTimezone":{"type":"string"}, + "AnomalyDetectorStateValue":{ + "type":"string", + "enum":[ + "PENDING_TRAINING", + "TRAINED_INSUFFICIENT_DATA", + "TRAINED" + ] + }, "AnomalyDetectors":{ "type":"list", "member":{"shape":"AnomalyDetector"} diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 2ee4acaf..4e46f046 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -162,6 +162,19 @@ ], "documentation":"

DeleteReportGroup: Deletes a report group. Before you delete a report group, you must delete its reports. Use ListReportsForReportGroup to get the reports in a report group. Use DeleteReport to delete the reports. If you call DeleteReportGroup for a report group that contains one or more reports, an exception is thrown.

" }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyInput"}, + "output":{"shape":"DeleteResourcePolicyOutput"}, + "errors":[ + {"shape":"InvalidInputException"} + ], + "documentation":"

Deletes a resource policy that is identified by its resource ARN.

" + }, "DeleteSourceCredentials":{ "name":"DeleteSourceCredentials", "http":{ @@ -205,6 +218,20 @@ ], "documentation":"

Returns a list of details about test cases for a report.

" }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourcePolicyInput"}, + "output":{"shape":"GetResourcePolicyOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Gets a resource policy that is identified by its resource ARN.

" + }, "ImportSourceCredentials":{ "name":"ImportSourceCredentials", "http":{ @@ -324,6 +351,32 @@ ], "documentation":"

Returns a list of ARNs for the reports that belong to a ReportGroup.

" }, + "ListSharedProjects":{ + "name":"ListSharedProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSharedProjectsInput"}, + "output":{"shape":"ListSharedProjectsOutput"}, + "errors":[ + {"shape":"InvalidInputException"} + ], + "documentation":"

Gets a list of projects that are shared with other AWS accounts or users.

" + }, + "ListSharedReportGroups":{ + "name":"ListSharedReportGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSharedReportGroupsInput"}, + "output":{"shape":"ListSharedReportGroupsOutput"}, + "errors":[ + {"shape":"InvalidInputException"} + ], + "documentation":"

Gets a list of report groups that are shared with other AWS accounts or users.

" + }, "ListSourceCredentials":{ "name":"ListSourceCredentials", "http":{ @@ -334,6 +387,20 @@ "output":{"shape":"ListSourceCredentialsOutput"}, "documentation":"

Returns a list of SourceCredentialsInfo objects.

" }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyInput"}, + "output":{"shape":"PutResourcePolicyOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Stores a resource policy for the ARN of a Project or ReportGroup object.

" + }, "StartBuild":{ "name":"StartBuild", "http":{ @@ -497,7 +564,7 @@ "members":{ "names":{ "shape":"ProjectNames", - "documentation":"

The names of the build projects.

" + "documentation":"

The names or ARNs of the build projects. To get information about a project shared with your AWS account, its ARN must be specified. You cannot specify a shared project using its name.

" } } }, @@ -679,6 +746,10 @@ "reportArns":{ "shape":"BuildReportArns", "documentation":"

An array of the ARNs associated with this build's reports.

" + }, + "fileSystemLocations":{ + "shape":"ProjectFileSystemLocations", + "documentation":"

An array of ProjectFileSystemLocation objects for a CodeBuild build project. A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, mountPoint, and type of a file system created using Amazon Elastic File System.

" } }, "documentation":"

Information about a build.

" @@ -700,7 +771,7 @@ }, "overrideArtifactName":{ "shape":"WrapperBoolean", - "documentation":"

If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

" + "documentation":"

If this flag is set, a name specified in the buildspec file overrides the artifact name. The name specified in a buildspec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

" }, "encryptionDisabled":{ "shape":"WrapperBoolean", @@ -926,6 +997,10 @@ "logsConfig":{ "shape":"LogsConfig", "documentation":"

Information about logs for the build project. These can be logs in Amazon CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.

" + }, + "fileSystemLocations":{ + "shape":"ProjectFileSystemLocations", + "documentation":"

An array of ProjectFileSystemLocation objects for a CodeBuild build project. A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, mountPoint, and type of a file system created using Amazon Elastic File System.

" } } }, @@ -1045,6 +1120,21 @@ "members":{ } }, + "DeleteResourcePolicyInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the resource that is associated with the resource policy.

" + } + } + }, + "DeleteResourcePolicyOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteSourceCredentialsInput":{ "type":"structure", "required":["arn"], @@ -1194,11 +1284,11 @@ }, "value":{ "shape":"String", - "documentation":"

The value of the environment variable.

We strongly discourage the use of environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. Environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI).

" + "documentation":"

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

" }, "type":{ "shape":"EnvironmentVariableType", - "documentation":"

The type of environment variable. Valid values include:

  • PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems Manager Parameter Store.

  • PLAINTEXT: An environment variable in plain text format.

  • SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager.

" + "documentation":"

The type of environment variable. Valid values include:

  • PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems Manager Parameter Store.

  • PLAINTEXT: An environment variable in plain text format. This is the default value.

  • SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager.

" } }, "documentation":"

Information about an environment variable for a build project or a build.

" @@ -1233,6 +1323,10 @@ "type":"list", "member":{"shape":"ExportedEnvironmentVariable"} }, + "FileSystemType":{ + "type":"string", + "enum":["EFS"] + }, "FilterGroup":{ "type":"list", "member":{"shape":"WebhookFilter"} @@ -1241,6 +1335,25 @@ "type":"list", "member":{"shape":"FilterGroup"} }, + "GetResourcePolicyInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the resource that is associated with the resource policy.

" + } + } + }, + "GetResourcePolicyOutput":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"NonEmptyString", + "documentation":"

The resource policy for the resource identified by the input ARN parameter.

" + } + } + }, "GitCloneDepth":{ "type":"integer", "min":0 @@ -1557,6 +1670,74 @@ } } }, + "ListSharedProjectsInput":{ + "type":"structure", + "members":{ + "sortBy":{ + "shape":"SharedResourceSortByType", + "documentation":"

The criterion to be used to list build projects shared with the current AWS account or user. Valid values include:

  • ARN: List based on the ARN.

  • MODIFIED_TIME: List based on when information about the shared project was last changed.

" + }, + "sortOrder":{ + "shape":"SortOrderType", + "documentation":"

The order in which to list shared build projects. Valid values include:

  • ASCENDING: List in ascending order.

  • DESCENDING: List in descending order.

" + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of paginated shared build projects returned per response. Use nextToken to iterate pages in the list of returned Project objects. The default value is 100.

" + }, + "nextToken":{ + "shape":"NonEmptyString", + "documentation":"

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

" + } + } + }, + "ListSharedProjectsOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

" + }, + "projects":{ + "shape":"ProjectArns", + "documentation":"

The list of ARNs for the build projects shared with the current AWS account or user.

" + } + } + }, + "ListSharedReportGroupsInput":{ + "type":"structure", + "members":{ + "sortOrder":{ + "shape":"SortOrderType", + "documentation":"

The order in which to list shared report groups. Valid values include:

  • ASCENDING: List in ascending order.

  • DESCENDING: List in descending order.

" + }, + "sortBy":{ + "shape":"SharedResourceSortByType", + "documentation":"

The criterion to be used to list report groups shared with the current AWS account or user. Valid values include:

  • ARN: List based on the ARN.

  • MODIFIED_TIME: List based on when information about the shared report group was last changed.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

" + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of paginated shared report groups per response. Use nextToken to iterate pages in the list of returned ReportGroup objects. The default value is 100.

" + } + } + }, + "ListSharedReportGroupsOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

" + }, + "reportGroups":{ + "shape":"ReportGroupArns", + "documentation":"

The list of ARNs for the report groups shared with the current AWS account or user.

" + } + } + }, "ListSourceCredentialsInput":{ "type":"structure", "members":{ @@ -1777,10 +1958,20 @@ "logsConfig":{ "shape":"LogsConfig", "documentation":"

Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, an S3 bucket, or both.

" + }, + "fileSystemLocations":{ + "shape":"ProjectFileSystemLocations", + "documentation":"

An array of ProjectFileSystemLocation objects for a CodeBuild build project. A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, mountPoint, and type of a file system created using Amazon Elastic File System.

" } }, "documentation":"

Information about a build project.

" }, + "ProjectArns":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":1 + }, "ProjectArtifacts":{ "type":"structure", "required":["type"], @@ -1811,7 +2002,7 @@ }, "overrideArtifactName":{ "shape":"WrapperBoolean", - "documentation":"

If this flag is set, a name specified in the build spec file overrides the artifact name. The name specified in a build spec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

" + "documentation":"

If this flag is set, a name specified in the buildspec file overrides the artifact name. The name specified in a buildspec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

" }, "encryptionDisabled":{ "shape":"WrapperBoolean", @@ -1915,6 +2106,36 @@ }, "documentation":"

Information about the build environment of the build project.

" }, + "ProjectFileSystemLocation":{ + "type":"structure", + "members":{ + "type":{ + "shape":"FileSystemType", + "documentation":"

The type of the file system. The one supported type is EFS.

" + }, + "location":{ + "shape":"String", + "documentation":"

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the AWS EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

" + }, + "mountPoint":{ + "shape":"String", + "documentation":"

The location in the container where you mount the file system.

" + }, + "identifier":{ + "shape":"String", + "documentation":"

The name used to access a file system created by Amazon EFS. CodeBuild creates an environment variable by appending the identifier in all capital letters to CODEBUILD_. For example, if you specify my-efs for identifier, a new environment variable is create named CODEBUILD_MY-EFS.

The identifier is used to mount your file system.

" + }, + "mountOptions":{ + "shape":"String", + "documentation":"

The mount options for a file system created by AWS EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. For more information, see Recommended NFS Mount Options.

" + } + }, + "documentation":"

Information about a file system created by Amazon Elastic File System (EFS). For more information, see What Is Amazon Elastic File System?

" + }, + "ProjectFileSystemLocations":{ + "type":"list", + "member":{"shape":"ProjectFileSystemLocation"} + }, "ProjectName":{ "type":"string", "max":255, @@ -1951,7 +2172,7 @@ }, "location":{ "shape":"String", - "documentation":"

Information about the location of the source code to be built. Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

" + "documentation":"

Information about the location of the source code to be built. Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow AWS CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.

" }, "gitCloneDepth":{ "shape":"GitCloneDepth", @@ -1963,7 +2184,7 @@ }, "buildspec":{ "shape":"String", - "documentation":"

The build spec declaration to use for the builds in this build project.

If this value is not specified, a build spec must be included along with the source code to be built.

" + "documentation":"

The buildspec file declaration to use for the builds in this build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" }, "auth":{ "shape":"SourceAuth", @@ -2012,6 +2233,32 @@ "type":"list", "member":{"shape":"Project"} }, + "PutResourcePolicyInput":{ + "type":"structure", + "required":[ + "policy", + "resourceArn" + ], + "members":{ + "policy":{ + "shape":"NonEmptyString", + "documentation":"

A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the AWS CodeBuild User Guide.

" + }, + "resourceArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the Project or ReportGroup resource you want to associate with a resource policy.

" + } + } + }, + "PutResourcePolicyOutput":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the Project or ReportGroup resource that is associated with a resource policy.

" + } + } + }, "RegistryCredential":{ "type":"structure", "required":[ @@ -2021,7 +2268,7 @@ "members":{ "credential":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current region.

" + "documentation":"

The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current AWS Region.

" }, "credentialProvider":{ "shape":"CredentialProviderType", @@ -2281,6 +2528,13 @@ "GITHUB_ENTERPRISE" ] }, + "SharedResourceSortByType":{ + "type":"string", + "enum":[ + "ARN", + "MODIFIED_TIME" + ] + }, "SortOrderType":{ "type":"string", "enum":[ @@ -2395,7 +2649,7 @@ }, "buildspecOverride":{ "shape":"String", - "documentation":"

A build spec declaration that overrides, for this build only, the latest one already defined in the build project.

" + "documentation":"

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

" }, "insecureSslOverride":{ "shape":"WrapperBoolean", @@ -2441,6 +2695,10 @@ "shape":"TimeOut", "documentation":"

The number of minutes a build is allowed to be queued before it times out.

" }, + "encryptionKeyOverride":{ + "shape":"NonEmptyString", + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/alias-name ).

" + }, "idempotencyToken":{ "shape":"String", "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 12 hours. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" @@ -2680,6 +2938,10 @@ "logsConfig":{ "shape":"LogsConfig", "documentation":"

Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, logs in an S3 bucket, or both.

" + }, + "fileSystemLocations":{ + "shape":"ProjectFileSystemLocations", + "documentation":"

An array of ProjectFileSystemLocation objects for a CodeBuild build project. A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, mountPoint, and type of a file system created using Amazon Elastic File System.

" } } }, @@ -2836,5 +3098,5 @@ "WrapperInt":{"type":"integer"}, "WrapperLong":{"type":"long"} }, - "documentation":"AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

AWS CodeBuild supports these operations:

  • BatchDeleteBuilds: Deletes one or more builds.

  • BatchGetBuilds: Gets information about one or more builds.

  • BatchGetProjects: Gets information about one or more build projects. A build project defines how AWS CodeBuild runs a build. This includes information such as where to get the source code to build, the build environment to use, the build commands to run, and where to store the build output. A build environment is a representation of operating system, programming language runtime, and tools that AWS CodeBuild uses to run a build. You can add tags to build projects to help manage your resources and costs.

  • BatchGetReportGroups: Returns an array of report groups.

  • BatchGetReports: Returns an array of reports.

  • CreateProject: Creates a build project.

  • CreateReportGroup: Creates a report group. A report group contains a collection of reports.

  • CreateWebhook: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

  • DeleteProject: Deletes a build project.

  • DeleteReport: Deletes a report.

  • DeleteReportGroup: Deletes a report group.

  • DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.

  • DeleteWebhook: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

  • DescribeTestCases: Returns a list of details about test cases for a report.

  • ImportSourceCredentials: Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

  • InvalidateProjectCache: Resets the cache for a project.

  • ListBuilds: Gets a list of build IDs, with each build ID representing a single build.

  • ListBuildsForProject: Gets a list of build IDs for the specified build project, with each build ID representing a single build.

  • ListCuratedEnvironmentImages: Gets information about Docker images that are managed by AWS CodeBuild.

  • ListProjects: Gets a list of build project names, with each build project name representing a single build project.

  • ListReportGroups: Gets a list ARNs for the report groups in the current AWS account.

  • ListReports: Gets a list ARNs for the reports in the current AWS account.

  • ListReportsForReportGroup: Returns a list of ARNs for the reports that belong to a ReportGroup.

  • ListSourceCredentials: Returns a list of SourceCredentialsInfo objects. Each SourceCredentialsInfo object includes the authentication type, token ARN, and type of source provider for one set of credentials.

  • StartBuild: Starts running a build.

  • StopBuild: Attempts to stop running a build.

  • UpdateProject: Changes the settings of an existing build project.

  • UpdateReportGroup: Changes a report group.

  • UpdateWebhook: Changes the settings of an existing webhook.

" + "documentation":"AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

AWS CodeBuild supports these operations:

  • BatchDeleteBuilds: Deletes one or more builds.

  • BatchGetBuilds: Gets information about one or more builds.

  • BatchGetProjects: Gets information about one or more build projects. A build project defines how AWS CodeBuild runs a build. This includes information such as where to get the source code to build, the build environment to use, the build commands to run, and where to store the build output. A build environment is a representation of operating system, programming language runtime, and tools that AWS CodeBuild uses to run a build. You can add tags to build projects to help manage your resources and costs.

  • BatchGetReportGroups: Returns an array of report groups.

  • BatchGetReports: Returns an array of reports.

  • CreateProject: Creates a build project.

  • CreateReportGroup: Creates a report group. A report group contains a collection of reports.

  • CreateWebhook: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

  • DeleteProject: Deletes a build project.

  • DeleteReport: Deletes a report.

  • DeleteReportGroup: Deletes a report group.

  • DeleteResourcePolicy: Deletes a resource policy that is identified by its resource ARN.

  • DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.

  • DeleteWebhook: For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

  • DescribeTestCases: Returns a list of details about test cases for a report.

  • GetResourcePolicy: Gets a resource policy that is identified by its resource ARN.

  • ImportSourceCredentials: Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

  • InvalidateProjectCache: Resets the cache for a project.

  • ListBuilds: Gets a list of build IDs, with each build ID representing a single build.

  • ListBuildsForProject: Gets a list of build IDs for the specified build project, with each build ID representing a single build.

  • ListCuratedEnvironmentImages: Gets information about Docker images that are managed by AWS CodeBuild.

  • ListProjects: Gets a list of build project names, with each build project name representing a single build project.

  • ListReportGroups: Gets a list ARNs for the report groups in the current AWS account.

  • ListReports: Gets a list ARNs for the reports in the current AWS account.

  • ListReportsForReportGroup: Returns a list of ARNs for the reports that belong to a ReportGroup.

  • ListSharedProjects: Gets a list of ARNs associated with projects shared with the current AWS account or user.

  • ListSharedReportGroups: Gets a list of ARNs associated with report groups shared with the current AWS account or user

  • ListSourceCredentials: Returns a list of SourceCredentialsInfo objects. Each SourceCredentialsInfo object includes the authentication type, token ARN, and type of source provider for one set of credentials.

  • PutResourcePolicy: Stores a resource policy for the ARN of a Project or ReportGroup object.

  • StartBuild: Starts running a build.

  • StopBuild: Attempts to stop running a build.

  • UpdateProject: Changes the settings of an existing build project.

  • UpdateReportGroup: Changes a report group.

  • UpdateWebhook: Changes the settings of an existing webhook.

" } diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index a060a373..3ba303ba 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -177,7 +177,7 @@ {"shape":"ValidationException"}, {"shape":"JobNotFoundException"} ], - "documentation":"

Returns information about a job. Used for custom actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" + "documentation":"

Returns information about a job. Used for custom actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" }, "GetPipeline":{ "name":"GetPipeline", @@ -237,7 +237,7 @@ {"shape":"InvalidClientTokenException"}, {"shape":"InvalidJobException"} ], - "documentation":"

Requests the details of a job for a third party action. Used for partner actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" + "documentation":"

Requests the details of a job for a third party action. Used for partner actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" }, "ListActionExecutions":{ "name":"ListActionExecutions", @@ -340,7 +340,7 @@ {"shape":"ValidationException"}, {"shape":"ActionTypeNotFoundException"} ], - "documentation":"

Returns information about any jobs for AWS CodePipeline to act on. PollForJobs is valid only for action types with \"Custom\" in the owner field. If the action type contains \"AWS\" or \"ThirdParty\" in the owner field, the PollForJobs action returns an error.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" + "documentation":"

Returns information about any jobs for AWS CodePipeline to act on. PollForJobs is valid only for action types with \"Custom\" in the owner field. If the action type contains \"AWS\" or \"ThirdParty\" in the owner field, the PollForJobs action returns an error.

When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

" }, "PollForThirdPartyJobs":{ "name":"PollForThirdPartyJobs", @@ -354,7 +354,7 @@ {"shape":"ActionTypeNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Determines whether there are any third party jobs for a job worker to act on. Used for partner actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts.

" + "documentation":"

Determines whether there are any third party jobs for a job worker to act on. Used for partner actions only.

When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts.

" }, "PutActionRevision":{ "name":"PutActionRevision", @@ -514,6 +514,22 @@ ], "documentation":"

Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.

" }, + "StopPipelineExecution":{ + "name":"StopPipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopPipelineExecutionInput"}, + "output":{"shape":"StopPipelineExecutionOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"PipelineExecutionNotStoppableException"}, + {"shape":"DuplicatedStopRequestException"} + ], + "documentation":"

Stops the specified pipeline execution. You choose to either stop the pipeline execution by completing in-progress actions without starting subsequent actions, or by abandoning in-progress actions. While completing or abandoning in-progress actions, the pipeline execution is in a Stopping state. After all in-progress actions are completed or abandoned, the pipeline execution is in a Stopped state.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -595,7 +611,7 @@ "documentation":"

The token for the session.

" } }, - "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

", + "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

", "sensitive":true }, "AccessKeyId":{"type":"string"}, @@ -993,6 +1009,7 @@ "type":"string", "enum":[ "InProgress", + "Abandoned", "Succeeded", "Failed" ] @@ -1289,7 +1306,7 @@ }, "s3Location":{ "shape":"S3ArtifactLocation", - "documentation":"

The Amazon S3 bucket that contains the artifact.

" + "documentation":"

The S3 bucket that contains the artifact.

" } }, "documentation":"

Represents information about the location of an artifact.

" @@ -1351,14 +1368,14 @@ }, "location":{ "shape":"ArtifactStoreLocation", - "documentation":"

The Amazon S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any Amazon S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.

" + "documentation":"

The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.

" }, "encryptionKey":{ "shape":"EncryptionKey", "documentation":"

The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.

" } }, - "documentation":"

The Amazon S3 bucket where artifacts for the pipeline are stored.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" + "documentation":"

The S3 bucket where artifacts for the pipeline are stored.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" }, "ArtifactStoreLocation":{ "type":"string", @@ -1649,6 +1666,14 @@ "min":1, "pattern":"[a-zA-Z0-9!@ \\(\\)\\.\\*\\?\\-]+" }, + "DuplicatedStopRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

The pipeline execution is already in a Stopping state. If you already chose to stop and wait, you cannot make that request again. You can choose to stop and abandon now, but be aware that this option can lead to failed tasks or out of sequence tasks. If you already chose to stop and abandon, you cannot make that request again.

", + "exception":true + }, "EnableStageTransitionInput":{ "type":"structure", "required":[ @@ -2096,7 +2121,7 @@ }, "artifactCredentials":{ "shape":"AWSSessionCredentials", - "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifacts for the pipeline in AWS CodePipeline.

" + "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifacts for the pipeline in AWS CodePipeline.

" }, "continuationToken":{ "shape":"ContinuationToken", @@ -2517,7 +2542,7 @@ }, "artifactStore":{ "shape":"ArtifactStore", - "documentation":"

Represents information about the Amazon S3 bucket where artifacts are stored for the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" + "documentation":"

Represents information about the S3 bucket where artifacts are stored for the pipeline.

You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

" }, "artifactStores":{ "shape":"ArtifactStoreMap", @@ -2539,11 +2564,11 @@ "members":{ "pipelineName":{ "shape":"PipelineName", - "documentation":"

The name of the pipeline that was executed.

" + "documentation":"

The name of the pipeline with the specified pipeline execution.

" }, "pipelineVersion":{ "shape":"PipelineVersion", - "documentation":"

The version number of the pipeline that was executed.

" + "documentation":"

The version number of the pipeline with the specified pipeline execution.

" }, "pipelineExecutionId":{ "shape":"PipelineExecutionId", @@ -2551,7 +2576,7 @@ }, "status":{ "shape":"PipelineExecutionStatus", - "documentation":"

The status of the pipeline execution.

  • InProgress: The pipeline execution is currently running.

  • Succeeded: The pipeline execution was completed successfully.

  • Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead.

  • Failed: The pipeline execution was not completed successfully.

" + "documentation":"

The status of the pipeline execution.

  • InProgress: The pipeline execution is currently running.

  • Stopped: The pipeline execution was manually stopped. For more information, see Stopped Executions.

  • Stopping: The pipeline execution received a request to be manually stopped. Depending on the selected stop mode, the execution is either completing or abandoning in-progress actions. For more information, see Stopped Executions.

  • Succeeded: The pipeline execution was completed successfully.

  • Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead. For more information, see Superseded Executions.

  • Failed: The pipeline execution was not completed successfully.

" }, "artifactRevisions":{ "shape":"ArtifactRevisionList", @@ -2571,10 +2596,20 @@ "documentation":"

The pipeline execution was specified in an invalid format or cannot be found, or an execution ID does not belong to the specified pipeline.

", "exception":true }, + "PipelineExecutionNotStoppableException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

Unable to stop the pipeline execution. The execution might already be in a Stopped state, or it might no longer be in progress.

", + "exception":true + }, "PipelineExecutionStatus":{ "type":"string", "enum":[ "InProgress", + "Stopped", + "Stopping", "Succeeded", "Superseded", "Failed" @@ -2589,7 +2624,7 @@ }, "status":{ "shape":"PipelineExecutionStatus", - "documentation":"

The status of the pipeline execution.

  • InProgress: The pipeline execution is currently running.

  • Succeeded: The pipeline execution was completed successfully.

  • Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead.

  • Failed: The pipeline execution was not completed successfully.

" + "documentation":"

The status of the pipeline execution.

  • InProgress: The pipeline execution is currently running.

  • Stopped: The pipeline execution was manually stopped. For more information, see Stopped Executions.

  • Stopping: The pipeline execution received a request to be manually stopped. Depending on the selected stop mode, the execution is either completing or abandoning in-progress actions. For more information, see Stopped Executions.

  • Succeeded: The pipeline execution was completed successfully.

  • Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead. For more information, see Superseded Executions.

  • Failed: The pipeline execution was not completed successfully.

" }, "startTime":{ "shape":"Timestamp", @@ -2606,6 +2641,10 @@ "trigger":{ "shape":"ExecutionTrigger", "documentation":"

The interaction or event that started a pipeline execution, such as automated change detection or a StartPipelineExecution API call.

" + }, + "stopTrigger":{ + "shape":"StopExecutionTrigger", + "documentation":"

The interaction that stopped a pipeline execution.

" } }, "documentation":"

Summary information about a pipeline execution.

" @@ -3057,14 +3096,14 @@ "members":{ "bucketName":{ "shape":"S3BucketName", - "documentation":"

The name of the Amazon S3 bucket.

" + "documentation":"

The name of the S3 bucket.

" }, "objectKey":{ "shape":"S3ObjectKey", - "documentation":"

The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.

" + "documentation":"

The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.

" } }, - "documentation":"

The location of the Amazon S3 bucket that contains a revision.

" + "documentation":"

The location of the S3 bucket that contains a revision.

" }, "S3Bucket":{ "type":"string", @@ -3184,6 +3223,8 @@ "enum":[ "InProgress", "Failed", + "Stopped", + "Stopping", "Succeeded" ] }, @@ -3270,6 +3311,54 @@ }, "documentation":"

Represents the output of a StartPipelineExecution action.

" }, + "StopExecutionTrigger":{ + "type":"structure", + "members":{ + "reason":{ + "shape":"StopPipelineExecutionReason", + "documentation":"

The user-specified reason the pipeline was stopped.

" + } + }, + "documentation":"

The interaction that stopped a pipeline execution.

" + }, + "StopPipelineExecutionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "pipelineExecutionId" + ], + "members":{ + "pipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline to stop.

" + }, + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

The ID of the pipeline execution to be stopped in the current stage. Use the GetPipelineState action to retrieve the current pipelineExecutionId.

" + }, + "abandon":{ + "shape":"Boolean", + "documentation":"

Use this option to stop the pipeline execution by abandoning, rather than finishing, in-progress actions.

This option can lead to failed or out-of-sequence tasks.

" + }, + "reason":{ + "shape":"StopPipelineExecutionReason", + "documentation":"

Use this option to enter comments, such as the reason the pipeline was stopped.

" + } + } + }, + "StopPipelineExecutionOutput":{ + "type":"structure", + "members":{ + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

The unique system-generated ID of the pipeline execution that was stopped.

" + } + } + }, + "StopPipelineExecutionReason":{ + "type":"string", + "max":200 + }, "String":{"type":"string"}, "Tag":{ "type":"structure", @@ -3368,7 +3457,7 @@ }, "artifactCredentials":{ "shape":"AWSSessionCredentials", - "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

" + "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

" }, "continuationToken":{ "shape":"ContinuationToken", @@ -3639,5 +3728,5 @@ "min":1 } }, - "documentation":"AWS CodePipeline

Overview

This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the AWS CodePipeline User Guide.

You can use the AWS CodePipeline API to work with pipelines, stages, actions, and transitions.

Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions.

You can work with pipelines by calling:

  • CreatePipeline, which creates a uniquely named pipeline.

  • DeletePipeline, which deletes the specified pipeline.

  • GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN).

  • GetPipelineExecution, which returns information about a specific execution of a pipeline.

  • GetPipelineState, which returns information about the current state of the stages and actions of a pipeline.

  • ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details.

  • ListPipelines, which gets a summary of all of the pipelines associated with your account.

  • ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline.

  • StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline.

  • UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline.

Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see AWS CodePipeline Pipeline Structure Reference.

Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are:

  • Source

  • Build

  • Test

  • Deploy

  • Approval

  • Invoke

Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

You can work with transitions by calling:

Using the API to integrate with AWS CodePipeline

For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. To integrate with AWS CodePipeline, developers need to work with the following items:

Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

You can work with jobs by calling:

Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

You can work with third party jobs by calling:

" + "documentation":"AWS CodePipeline

Overview

This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the AWS CodePipeline User Guide.

You can use the AWS CodePipeline API to work with pipelines, stages, actions, and transitions.

Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions.

You can work with pipelines by calling:

  • CreatePipeline, which creates a uniquely named pipeline.

  • DeletePipeline, which deletes the specified pipeline.

  • GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN).

  • GetPipelineExecution, which returns information about a specific execution of a pipeline.

  • GetPipelineState, which returns information about the current state of the stages and actions of a pipeline.

  • ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details.

  • ListPipelines, which gets a summary of all of the pipelines associated with your account.

  • ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline.

  • StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline.

  • StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline.

  • UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline.

Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see AWS CodePipeline Pipeline Structure Reference.

Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are:

  • Source

  • Build

  • Test

  • Deploy

  • Approval

  • Invoke

Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

You can work with transitions by calling:

Using the API to integrate with AWS CodePipeline

For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. To integrate with AWS CodePipeline, developers need to work with the following items:

Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

You can work with jobs by calling:

Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

You can work with third party jobs by calling:

" } diff --git a/botocore/data/codestar-connections/2019-12-01/paginators-1.json b/botocore/data/codestar-connections/2019-12-01/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/codestar-connections/2019-12-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/codestar-connections/2019-12-01/service-2.json b/botocore/data/codestar-connections/2019-12-01/service-2.json new file mode 100644 index 00000000..9116e8ec --- /dev/null +++ b/botocore/data/codestar-connections/2019-12-01/service-2.json @@ -0,0 +1,250 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-01", + "endpointPrefix":"codestar-connections", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"AWS CodeStar connections", + "serviceId":"CodeStar connections", + "signatureVersion":"v4", + "signingName":"codestar-connections", + "targetPrefix":"com.amazonaws.codestar.connections.CodeStar_connections_20191201", + "uid":"codestar-connections-2019-12-01" + }, + "operations":{ + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConnectionInput"}, + "output":{"shape":"CreateConnectionOutput"}, + "errors":[ + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a connection that can then be given to other AWS services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.

" + }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionInput"}, + "output":{"shape":"DeleteConnectionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

The connection to be deleted.

" + }, + "GetConnection":{ + "name":"GetConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConnectionInput"}, + "output":{"shape":"GetConnectionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the connection ARN and details such as status, owner, and provider type.

" + }, + "ListConnections":{ + "name":"ListConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListConnectionsInput"}, + "output":{"shape":"ListConnectionsOutput"}, + "documentation":"

Lists the connections associated with your account.

" + } + }, + "shapes":{ + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, + "Connection":{ + "type":"structure", + "members":{ + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection. Connection names must be unique in an AWS user account.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between AWS services.

The ARN is never reused if the connection is deleted.

" + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The name of the external provider where your third-party code repository is configured. Currently, the valid provider type is Bitbucket.

" + }, + "OwnerAccountId":{ + "shape":"AccountId", + "documentation":"

The name of the external provider where your third-party code repository is configured. For Bitbucket, this is the account ID of the owner of the Bitbucket repository.

" + }, + "ConnectionStatus":{ + "shape":"ConnectionStatus", + "documentation":"

The current status of the connection.

" + } + }, + "documentation":"

The configuration that allows a service such as CodePipeline to connect to a third-party code repository.

" + }, + "ConnectionArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws(-[\\w]+)*:.+:.+:[0-9]{12}:.+" + }, + "ConnectionList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionName":{ + "type":"string", + "max":32, + "min":1 + }, + "ConnectionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "ERROR" + ] + }, + "CreateConnectionInput":{ + "type":"structure", + "required":[ + "ProviderType", + "ConnectionName" + ], + "members":{ + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The name of the external provider where your third-party code repository is configured. Currently, the valid provider type is Bitbucket.

" + }, + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to be created. The name must be unique in the calling AWS account.

" + } + } + }, + "CreateConnectionOutput":{ + "type":"structure", + "required":["ConnectionArn"], + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between AWS services.

The ARN is never reused if the connection is deleted.

" + } + } + }, + "DeleteConnectionInput":{ + "type":"structure", + "required":["ConnectionArn"], + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of the connection to be deleted.

The ARN is never reused if the connection is deleted.

" + } + } + }, + "DeleteConnectionOutput":{ + "type":"structure", + "members":{ + } + }, + "ErrorMessage":{ + "type":"string", + "max":600 + }, + "GetConnectionInput":{ + "type":"structure", + "required":["ConnectionArn"], + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of a connection.

" + } + } + }, + "GetConnectionOutput":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

The connection details, such as status, owner, and provider type.

" + } + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exceeded the maximum limit for connections.

", + "exception":true + }, + "ListConnectionsInput":{ + "type":"structure", + "members":{ + "ProviderTypeFilter":{ + "shape":"ProviderType", + "documentation":"

Filters the list of connections to those associated with a specified provider, such as Bitbucket.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token that was returned from the previous ListConnections call, which can be used to return the next set of connections in the list.

" + } + } + }, + "ListConnectionsOutput":{ + "type":"structure", + "members":{ + "Connections":{ + "shape":"ConnectionList", + "documentation":"

A list of connections and the details for each connection, such as status, owner, and provider type.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used in the next ListConnections call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[a-zA-Z0-9=\\-\\\\/]+" + }, + "ProviderType":{ + "type":"string", + "enum":["Bitbucket"] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Resource not found. Verify the connection resource ARN and try again.

", + "exception":true + } + }, + "documentation":"

This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the Connections API to work with connections and installations.

Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket Cloud app. When you create a connection, you can choose an existing installation or create one.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

For information about how to use AWS CodeStar Connections, see the AWS CodePipeline User Guide.

" +} diff --git a/botocore/data/comprehend/2017-11-27/service-2.json b/botocore/data/comprehend/2017-11-27/service-2.json index 8707113a..a5607e55 100644 --- a/botocore/data/comprehend/2017-11-27/service-2.json +++ b/botocore/data/comprehend/2017-11-27/service-2.json @@ -1140,6 +1140,22 @@ "F1Score":{ "shape":"Double", "documentation":"

A measure of how accurate the classifier results are for the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

" + }, + "MicroPrecision":{ + "shape":"Double", + "documentation":"

A measure of the usefulness of the recognizer results in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones. Unlike the Precision metric which comes from averaging the precision of all available labels, this is based on the overall score of all precision scores added together.

" + }, + "MicroRecall":{ + "shape":"Double", + "documentation":"

A measure of how complete the classifier results are for the test data. High recall means that the classifier returned most of the relevant results. Specifically, this indicates how many of the correct categories in the text that the model can predict. It is a percentage of correct categories in the text that can found. Instead of averaging the recall scores of all labels (as with Recall), micro Recall is based on the overall score of all recall scores added together.

" + }, + "MicroF1Score":{ + "shape":"Double", + "documentation":"

A measure of how accurate the classifier results are for the test data. It is a combination of the Micro Precision and Micro Recall values. The Micro F1Score is the harmonic mean of the two scores. The highest score is 1, and the worst score is 0.

" + }, + "HammingLoss":{ + "shape":"Double", + "documentation":"

Indicates the fraction of labels that are incorrectly predicted. Also seen as the fraction of wrong labels compared to the total number of labels. Scores closer to zero are better.

" } }, "documentation":"

Describes the result metrics for the test data associated with an documentation classifier.

" @@ -1189,6 +1205,10 @@ "Classes":{ "shape":"ListOfClasses", "documentation":"

The classes used by the document being analyzed. These are used for multi-class trained models. Individual classes are mutually exclusive and each document is expected to have only a single class assigned to it. For example, an animal can be a dog or a cat, but not both at the same time.

" + }, + "Labels":{ + "shape":"ListOfLabels", + "documentation":"

The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.

" } } }, @@ -1276,6 +1296,10 @@ "VpcConfig":{ "shape":"VpcConfig", "documentation":"

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.

" + }, + "Mode":{ + "shape":"DocumentClassifierMode", + "documentation":"

Indicates the mode in which the classifier will be trained. The classifier can be trained in multi-class mode, which identifies one and only one class for each document, or multi-label mode, which identifies one or more labels for each document. In multi-label mode, multiple labels for an individual document are separated by a delimiter. The default delimiter between labels is a pipe (|).

" } } }, @@ -1853,10 +1877,21 @@ "S3Uri":{ "shape":"S3Uri", "documentation":"

The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.

For example, if you use the URI S3://bucketName/prefix, if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.

" + }, + "LabelDelimiter":{ + "shape":"LabelDelimiter", + "documentation":"

Indicates the delimiter used to separate each label for training a multi-label classifier. The default delimiter between labels is a pipe (|). You can use a different character as a delimiter (if it's an allowed character) by specifying it under Delimiter for labels. If the training documents use a delimiter other than the default or the delimiter you specify, the labels on that line will be combined to make a single unique label, such as LABELLABELLABEL.

" } }, "documentation":"

The input properties for training a document classifier.

For more information on how the input file is formatted, see how-document-classification-training-data.

" }, + "DocumentClassifierMode":{ + "type":"string", + "enum":[ + "MULTI_CLASS", + "MULTI_LABEL" + ] + }, "DocumentClassifierOutputDataConfig":{ "type":"structure", "members":{ @@ -1929,6 +1964,10 @@ "VpcConfig":{ "shape":"VpcConfig", "documentation":"

Configuration parameters for a private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.

" + }, + "Mode":{ + "shape":"DocumentClassifierMode", + "documentation":"

Indicates the mode in which the specific classifier was trained. This also indicates the format of input documents and the format of the confusion matrix. Each classifier can only be trained in one mode and this cannot be changed once the classifier is trained.

" } }, "documentation":"

Provides information about a document classifier.

" @@ -1937,6 +1976,20 @@ "type":"list", "member":{"shape":"DocumentClassifierProperties"} }, + "DocumentLabel":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the label.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The confidence score that Amazon Comprehend has this label correctly attributed.

" + } + }, + "documentation":"

Specifies one of the label or labels that categorize the document being analyzed.

" + }, "DominantLanguage":{ "type":"structure", "members":{ @@ -2674,6 +2727,12 @@ "documentation":"

The KMS customer managed key (CMK) entered cannot be validated. Verify the key and re-enter it.

", "exception":true }, + "LabelDelimiter":{ + "type":"string", + "max":1, + "min":1, + "pattern":"^[ ~!@#$%^*\\-_+=|\\\\:;\\t>?/]$" + }, "LanguageCode":{ "type":"string", "enum":[ @@ -2937,6 +2996,10 @@ "type":"list", "member":{"shape":"KeyPhrase"} }, + "ListOfLabels":{ + "type":"list", + "member":{"shape":"DocumentLabel"} + }, "ListOfSyntaxTokens":{ "type":"list", "member":{"shape":"SyntaxToken"} diff --git a/botocore/data/comprehendmedical/2018-10-30/service-2.json b/botocore/data/comprehendmedical/2018-10-30/service-2.json index 372eb851..32d34a0c 100644 --- a/botocore/data/comprehendmedical/2018-10-30/service-2.json +++ b/botocore/data/comprehendmedical/2018-10-30/service-2.json @@ -82,7 +82,7 @@ {"shape":"InvalidEncodingException"}, {"shape":"TextSizeLimitExceededException"} ], - "documentation":"

Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information.

The DetectEntitiesV2 operation replaces the DetectEntities operation. This new action uses a different model for determining the entities in your medical text and changes the way that some entities are returned in the output. You should use the DetectEntitiesV2 operation in all new applications.

The DetectEntitiesV2 operation returns the Acuity and Direction entities as attributes instead of types. It does not return the Quality or Quantity entities.

" + "documentation":"

Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that information.

The DetectEntitiesV2 operation replaces the DetectEntities operation. This new action uses a different model for determining the entities in your medical text and changes the way that some entities are returned in the output. You should use the DetectEntitiesV2 operation in all new applications.

The DetectEntitiesV2 operation returns the Acuity and Direction entities as attributes instead of types.

" }, "DetectPHI":{ "name":"DetectPHI", @@ -102,6 +102,42 @@ ], "documentation":"

Inspects the clinical text for protected health information (PHI) entities and entity category, location, and confidence score on that information.

" }, + "InferICD10CM":{ + "name":"InferICD10CM", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InferICD10CMRequest"}, + "output":{"shape":"InferICD10CMResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidEncodingException"}, + {"shape":"TextSizeLimitExceededException"} + ], + "documentation":"

InferICD10CM detects medical conditions as entities listed in a patient record and links those entities to normalized concept identifiers in the ICD-10-CM knowledge base from the Centers for Disease Control.

" + }, + "InferRxNorm":{ + "name":"InferRxNorm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InferRxNormRequest"}, + "output":{"shape":"InferRxNormResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidEncodingException"}, + {"shape":"TextSizeLimitExceededException"} + ], + "documentation":"

InferRxNorm detects medications as entities listed in a patient record and links to the normalized concept identifiers in the RxNorm database from the National Library of Medicine.

" + }, "ListEntitiesDetectionV2Jobs":{ "name":"ListEntitiesDetectionV2Jobs", "http":{ @@ -224,7 +260,7 @@ }, "EndOffset":{ "shape":"Integer", - "documentation":"

The 0-based character offset in the input text that shows where the attribute ends. The offset returns the UTF-8 code point in the string.

" + "documentation":"

The 0-based character offset in the input text that shows where the attribute ends. The offset returns the UTF-8 code point in the string.

" }, "Text":{ "shape":"String", @@ -463,7 +499,7 @@ "members":{ "Text":{ "shape":"BoundedLengthString", - "documentation":"

A UTF-8 text string containing the clinical content being examined for PHI entities. Each string must contain fewer than 20,000 bytes of characters.

" + "documentation":"

A UTF-8 text string containing the clinical content being examined for PHI entities. Each string must contain fewer than 20,000 bytes of characters.

" } } }, @@ -517,11 +553,11 @@ }, "Type":{ "shape":"EntitySubType", - "documentation":"

Describes the specific type of entity with category of entities.

" + "documentation":"

Describes the specific type of entity with category of entities.

" }, "Traits":{ "shape":"TraitList", - "documentation":"

Contextual information for the entity

" + "documentation":"

Contextual information for the entity.

" }, "Attributes":{ "shape":"AttributeList", @@ -578,12 +614,227 @@ ] }, "Float":{"type":"float"}, + "ICD10CMAttribute":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ICD10CMAttributeType", + "documentation":"

The type of attribute. InferICD10CM detects entities of the type DX_NAME.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has that the segment of text is correctly recognized as an attribute.

" + }, + "RelationshipScore":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has that this attribute is correctly related to this entity.

" + }, + "Id":{ + "shape":"Integer", + "documentation":"

The numeric identifier for this attribute. This is a monotonically increasing id unique within this response rather than a global unique identifier.

" + }, + "BeginOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the attribute begins. The offset returns the UTF-8 code point in the string.

" + }, + "EndOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the attribute ends. The offset returns the UTF-8 code point in the string.

" + }, + "Text":{ + "shape":"String", + "documentation":"

The segment of input text which contains the detected attribute.

" + }, + "Traits":{ + "shape":"ICD10CMTraitList", + "documentation":"

The contextual information for the attribute. The traits recognized by InferICD10CM are DIAGNOSIS, SIGN, SYMPTOM, and NEGATION.

" + } + }, + "documentation":"

The detected attributes that relate to an entity. This includes an extracted segment of the text that is an attribute of an entity, or otherwise related to an entity. InferICD10CM detects the following attributes: Direction, System, Organ or Site, and Acuity.

" + }, + "ICD10CMAttributeList":{ + "type":"list", + "member":{"shape":"ICD10CMAttribute"} + }, + "ICD10CMAttributeType":{ + "type":"string", + "enum":[ + "ACUITY", + "DIRECTION", + "SYSTEM_ORGAN_SITE", + "QUALITY", + "QUANTITY" + ] + }, + "ICD10CMConcept":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

The long description of the ICD-10-CM code in the ontology.

" + }, + "Code":{ + "shape":"String", + "documentation":"

The ICD-10-CM code that identifies the concept found in the knowledge base from the Centers for Disease Control.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has that the entity is accurately linked to an ICD-10-CM concept.

" + } + }, + "documentation":"

The ICD-10-CM concepts that the entity could refer to, along with a score indicating the likelihood of the match.

" + }, + "ICD10CMConceptList":{ + "type":"list", + "member":{"shape":"ICD10CMConcept"} + }, + "ICD10CMEntity":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"Integer", + "documentation":"

The numeric identifier for the entity. This is a monotonically increasing id unique within this response rather than a global unique identifier.

" + }, + "Text":{ + "shape":"OntologyLinkingBoundedLengthString", + "documentation":"

The segment of input text that is matched to the detected entity.

" + }, + "Category":{ + "shape":"ICD10CMEntityCategory", + "documentation":"

The category of the entity. InferICD10CM detects entities in the MEDICAL_CONDITION category.

" + }, + "Type":{ + "shape":"ICD10CMEntityType", + "documentation":"

Describes the specific type of entity with category of entities. InferICD10CM detects entities of the type DX_NAME.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has in the accuracy of the detection.

" + }, + "BeginOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the entity begins. The offset returns the UTF-8 code point in the string.

" + }, + "EndOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the entity ends. The offset returns the UTF-8 code point in the string.

" + }, + "Attributes":{ + "shape":"ICD10CMAttributeList", + "documentation":"

The detected attributes that relate to the entity. An extracted segment of the text that is an attribute of an entity, or otherwise related to an entity, such as the nature of a medical condition.

" + }, + "Traits":{ + "shape":"ICD10CMTraitList", + "documentation":"

Provides Contextual information for the entity. The traits recognized by InferICD10CM are DIAGNOSIS, SIGN, SYMPTOM, and NEGATION.

" + }, + "ICD10CMConcepts":{ + "shape":"ICD10CMConceptList", + "documentation":"

The ICD-10-CM concepts that the entity could refer to, along with a score indicating the likelihood of the match.

" + } + }, + "documentation":"

The collection of medical entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Amazon Comprehend Medical has in the detection and analysis. Attributes and traits of the entity are also returned.

" + }, + "ICD10CMEntityCategory":{ + "type":"string", + "enum":["MEDICAL_CONDITION"] + }, + "ICD10CMEntityList":{ + "type":"list", + "member":{"shape":"ICD10CMEntity"} + }, + "ICD10CMEntityType":{ + "type":"string", + "enum":["DX_NAME"] + }, + "ICD10CMTrait":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ICD10CMTraitName", + "documentation":"

Provides a name or contextual description about the trait.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has that the segment of text is correctly recognized as a trait.

" + } + }, + "documentation":"

Contextual information for the entity. The traits recognized by InferICD10CM are DIAGNOSIS, SIGN, SYMPTOM, and NEGATION.

" + }, + "ICD10CMTraitList":{ + "type":"list", + "member":{"shape":"ICD10CMTrait"} + }, + "ICD10CMTraitName":{ + "type":"string", + "enum":[ + "NEGATION", + "DIAGNOSIS", + "SIGN", + "SYMPTOM" + ] + }, "IamRoleArn":{ "type":"string", "max":2048, "min":20, "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" }, + "InferICD10CMRequest":{ + "type":"structure", + "required":["Text"], + "members":{ + "Text":{ + "shape":"OntologyLinkingBoundedLengthString", + "documentation":"

The input text used for analysis. The input for InferICD10CM is a string from 1 to 10000 characters.

" + } + } + }, + "InferICD10CMResponse":{ + "type":"structure", + "required":["Entities"], + "members":{ + "Entities":{ + "shape":"ICD10CMEntityList", + "documentation":"

The medical conditions detected in the text linked to ICD-10-CM concepts. If the action is successful, the service sends back an HTTP 200 response, as well as the entities detected.

" + }, + "PaginationToken":{ + "shape":"String", + "documentation":"

If the result of the previous request to InferICD10CM was truncated, include the PaginationToken to fetch the next page of medical condition entities.

" + }, + "ModelVersion":{ + "shape":"String", + "documentation":"

The version of the model used to analyze the documents, in the format n.n.n You can use this information to track the model used for a particular batch of documents.

" + } + } + }, + "InferRxNormRequest":{ + "type":"structure", + "required":["Text"], + "members":{ + "Text":{ + "shape":"OntologyLinkingBoundedLengthString", + "documentation":"

The input text used for analysis. The input for InferRxNorm is a string from 1 to 10000 characters.

" + } + } + }, + "InferRxNormResponse":{ + "type":"structure", + "required":["Entities"], + "members":{ + "Entities":{ + "shape":"RxNormEntityList", + "documentation":"

The medication entities detected in the text linked to RxNorm concepts. If the action is successful, the service sends back an HTTP 200 response, as well as the entities detected.

" + }, + "PaginationToken":{ + "shape":"String", + "documentation":"

If the result of the previous request to InferRxNorm was truncated, include the PaginationToken to fetch the next page of medication entities.

" + }, + "ModelVersion":{ + "shape":"String", + "documentation":"

The version of the model used to analyze the documents, in the format n.n.n You can use this information to track the model used for a particular batch of documents.

" + } + } + }, "InputDataConfig":{ "type":"structure", "required":["S3Bucket"], @@ -730,6 +981,11 @@ "min":1 }, "ModelVersion":{"type":"string"}, + "OntologyLinkingBoundedLengthString":{ + "type":"string", + "max":10000, + "min":1 + }, "OutputDataConfig":{ "type":"structure", "required":["S3Bucket"], @@ -753,6 +1009,165 @@ "documentation":"

The resource identified by the specified Amazon Resource Name (ARN) was not found. Check the ARN and try your request again.

", "exception":true }, + "RxNormAttribute":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"RxNormAttributeType", + "documentation":"

The type of attribute. The types of attributes recognized by InferRxNorm are BRAND_NAME and GENERIC_NAME.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Comprehend Medical has that the segment of text is correctly recognized as an attribute.

" + }, + "RelationshipScore":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has that the attribute is accurately linked to an entity.

" + }, + "Id":{ + "shape":"Integer", + "documentation":"

The numeric identifier for this attribute. This is a monotonically increasing id unique within this response rather than a global unique identifier.

" + }, + "BeginOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the attribute begins. The offset returns the UTF-8 code point in the string.

" + }, + "EndOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the attribute ends. The offset returns the UTF-8 code point in the string.

" + }, + "Text":{ + "shape":"String", + "documentation":"

The segment of input text which corresponds to the detected attribute.

" + }, + "Traits":{ + "shape":"RxNormTraitList", + "documentation":"

Contextual information for the attribute. InferRxNorm recognizes the trait NEGATION for attributes, i.e. that the patient is not taking a specific dose or form of a medication.

" + } + }, + "documentation":"

The extracted attributes that relate to this entity. The attributes recognized by InferRxNorm are DOSAGE, DURATION, FORM, FREQUENCY, RATE, ROUTE_OR_MODE.

" + }, + "RxNormAttributeList":{ + "type":"list", + "member":{"shape":"RxNormAttribute"} + }, + "RxNormAttributeType":{ + "type":"string", + "enum":[ + "DOSAGE", + "DURATION", + "FORM", + "FREQUENCY", + "RATE", + "ROUTE_OR_MODE", + "STRENGTH" + ] + }, + "RxNormConcept":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

The description of the RxNorm concept.

" + }, + "Code":{ + "shape":"String", + "documentation":"

RxNorm concept ID, also known as the RxCUI.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has that the entity is accurately linked to the reported RxNorm concept.

" + } + }, + "documentation":"

The RxNorm concept that the entity could refer to, along with a score indicating the likelihood of the match.

" + }, + "RxNormConceptList":{ + "type":"list", + "member":{"shape":"RxNormConcept"} + }, + "RxNormEntity":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"Integer", + "documentation":"

The numeric identifier for the entity. This is a monotonically increasing id unique within this response rather than a global unique identifier.

" + }, + "Text":{ + "shape":"OntologyLinkingBoundedLengthString", + "documentation":"

The segment of input text extracted from which the entity was detected.

" + }, + "Category":{ + "shape":"RxNormEntityCategory", + "documentation":"

The category of the entity. The recognized categories are GENERIC or BRAND_NAME.

" + }, + "Type":{ + "shape":"RxNormEntityType", + "documentation":"

Describes the specific type of entity. For InferRxNorm, the recognized entity type is MEDICATION.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has in the accuracy of the detected entity.

" + }, + "BeginOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the entity begins. The offset returns the UTF-8 code point in the string.

" + }, + "EndOffset":{ + "shape":"Integer", + "documentation":"

The 0-based character offset in the input text that shows where the entity ends. The offset returns the UTF-8 code point in the string.

" + }, + "Attributes":{ + "shape":"RxNormAttributeList", + "documentation":"

The extracted attributes that relate to the entity. The attributes recognized by InferRxNorm are DOSAGE, DURATION, FORM, FREQUENCY, RATE, ROUTE_OR_MODE, and STRENGTH.

" + }, + "Traits":{ + "shape":"RxNormTraitList", + "documentation":"

Contextual information for the entity.

" + }, + "RxNormConcepts":{ + "shape":"RxNormConceptList", + "documentation":"

The RxNorm concepts that the entity could refer to, along with a score indicating the likelihood of the match.

" + } + }, + "documentation":"

The collection of medical entities extracted from the input text and their associated information. For each entity, the response provides the entity text, the entity category, where the entity text begins and ends, and the level of confidence that Amazon Comprehend Medical has in the detection and analysis. Attributes and traits of the entity are also returned.

" + }, + "RxNormEntityCategory":{ + "type":"string", + "enum":["MEDICATION"] + }, + "RxNormEntityList":{ + "type":"list", + "member":{"shape":"RxNormEntity"} + }, + "RxNormEntityType":{ + "type":"string", + "enum":[ + "BRAND_NAME", + "GENERIC_NAME" + ] + }, + "RxNormTrait":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"RxNormTraitName", + "documentation":"

Provides a name or contextual description about the trait.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend Medical has in the accuracy of the detected trait.

" + } + }, + "documentation":"

The contextual information for the entity. InferRxNorm recognizes the trait NEGATION, which is any indication that the patient is not taking a medication.

" + }, + "RxNormTraitList":{ + "type":"list", + "member":{"shape":"RxNormTrait"} + }, + "RxNormTraitName":{ + "type":"string", + "enum":["NEGATION"] + }, "S3Bucket":{ "type":"string", "max":63, diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 0bc3a2a0..605b7dad 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -56,6 +56,20 @@ ], "documentation":"

Creates an endpoint for an Amazon EFS file system.

" }, + "CreateLocationFsxWindows":{ + "name":"CreateLocationFsxWindows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationFsxWindowsRequest"}, + "output":{"shape":"CreateLocationFsxWindowsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Creates an endpoint for an Amazon FSx for Windows file system.

" + }, "CreateLocationNfs":{ "name":"CreateLocationNfs", "http":{ @@ -182,6 +196,20 @@ ], "documentation":"

Returns metadata, such as the path information about an Amazon EFS location.

" }, + "DescribeLocationFsxWindows":{ + "name":"DescribeLocationFsxWindows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationFsxWindowsRequest"}, + "output":{"shape":"DescribeLocationFsxWindowsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Returns metadata, such as the path information about an Amazon FSx for Windows location.

" + }, "DescribeLocationNfs":{ "name":"DescribeLocationNfs", "http":{ @@ -515,7 +543,7 @@ ], "members":{ "Subdirectory":{ - "shape":"Subdirectory", + "shape":"EfsSubdirectory", "documentation":"

A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.

Subdirectory must be specified with forward slashes. For example /path/to/folder.

" }, "EfsFilesystemArn":{ @@ -543,6 +571,54 @@ }, "documentation":"

CreateLocationEfs

" }, + "CreateLocationFsxWindowsRequest":{ + "type":"structure", + "required":[ + "FsxFilesystemArn", + "SecurityGroupArns", + "User", + "Password" + ], + "members":{ + "Subdirectory":{ + "shape":"FsxWindowsSubdirectory", + "documentation":"

A subdirectory in the location’s path. This subdirectory in the Amazon FSx for Windows file system is used to read data from the Amazon FSx for Windows source location or write data to the FSx for Windows destination.

" + }, + "FsxFilesystemArn":{ + "shape":"FsxFilesystemArn", + "documentation":"

The Amazon Resource Name (ARN) for the FSx for Windows file system.

" + }, + "SecurityGroupArns":{ + "shape":"Ec2SecurityGroupArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Windows file system.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows file system.

" + }, + "Domain":{ + "shape":"SmbDomain", + "documentation":"

The name of the Windows domain that the FSx for Windows server belongs to.

" + }, + "Password":{ + "shape":"SmbPassword", + "documentation":"

The password of the user who has the permissions to access files and folders in the FSx for Windows file system.

" + } + } + }, + "CreateLocationFsxWindowsResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows file system location that is created.

" + } + } + }, "CreateLocationNfsRequest":{ "type":"structure", "required":[ @@ -552,7 +628,7 @@ ], "members":{ "Subdirectory":{ - "shape":"NonEmptySubdirectory", + "shape":"NfsSubdirectory", "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" }, "ServerHostname":{ @@ -592,7 +668,7 @@ ], "members":{ "Subdirectory":{ - "shape":"Subdirectory", + "shape":"S3Subdirectory", "documentation":"

A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is used to read data from the S3 source location or write data to the S3 destination.

" }, "S3BucketArn":{ @@ -632,7 +708,7 @@ ], "members":{ "Subdirectory":{ - "shape":"NonEmptySubdirectory", + "shape":"SmbSubdirectory", "documentation":"

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

Subdirectory must be specified with forward slashes. For example /path/to/folder.

To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.

" }, "ServerHostname":{ @@ -851,6 +927,45 @@ }, "documentation":"

DescribeLocationEfsResponse

" }, + "DescribeLocationFsxWindowsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows location to describe.

" + } + } + }, + "DescribeLocationFsxWindowsResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon resource Name (ARN) of the FSx for Windows location that was described.

" + }, + "LocationUri":{ + "shape":"LocationUri", + "documentation":"

The URL of the FSx for Windows location that was described.

" + }, + "SecurityGroupArns":{ + "shape":"Ec2SecurityGroupArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are configured for the for the FSx for Windows file system.

" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"

The time that the FSx for Windows location was created.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows file system.

" + }, + "Domain":{ + "shape":"SmbDomain", + "documentation":"

The name of the Windows domain that the FSx for Windows server belongs to.

" + } + } + }, "DescribeLocationNfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -1150,6 +1265,11 @@ "max":128, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):elasticfilesystem:[a-z\\-0-9]*:[0-9]{12}:file-system/fs-.*$" }, + "EfsSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$" + }, "Endpoint":{ "type":"string", "max":15, @@ -1195,6 +1315,16 @@ "max":409600, "pattern":"^[^\\x00]+$" }, + "FsxFilesystemArn":{ + "type":"string", + "max":128, + "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):fsx:[a-z\\-0-9]*:[0-9]{12}:file-system/fs-.*$" + }, + "FsxWindowsSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" + }, "Gid":{ "type":"string", "enum":[ @@ -1402,14 +1532,22 @@ }, "LocationUri":{ "type":"string", - "max":4355, - "pattern":"^(efs|nfs|s3|smb)://[a-zA-Z0-9.\\-]+$" + "max":4356, + "pattern":"^(efs|nfs|s3|smb|fsxw)://[a-zA-Z0-9.\\-]+$" }, "LogGroupArn":{ "type":"string", "max":562, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):logs:[a-z\\-0-9]*:[0-9]{12}:log-group:([^:\\*]*)$" }, + "LogLevel":{ + "type":"string", + "enum":[ + "OFF", + "BASIC", + "TRANSFER" + ] + }, "MaxResults":{ "type":"integer", "max":100, @@ -1442,6 +1580,11 @@ }, "documentation":"

Represents the mount options that are available for DataSync to access an NFS location.

" }, + "NfsSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]+$" + }, "NfsVersion":{ "type":"string", "enum":[ @@ -1451,11 +1594,6 @@ "NFS4_1" ] }, - "NonEmptySubdirectory":{ - "type":"string", - "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]+$" - }, "OnPremConfig":{ "type":"structure", "required":["AgentArns"], @@ -1513,6 +1651,10 @@ "TaskQueueing":{ "shape":"TaskQueueing", "documentation":"

A value that determines whether tasks should be queued before executing the tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED.

If you use the same agent to run multiple tasks you can enable the tasks to run in series. For more information see queue-task-execution.

" + }, + "LogLevel":{ + "shape":"LogLevel", + "documentation":"

A value that determines the type of logs DataSync will deliver to your AWS CloudWatch Logs file. If set to OFF, no logs will be delivered. BASIC will deliver a few logs per transfer operation and TRANSFER will deliver a verbose log that contains logs for every file that is transferred.

" } }, "documentation":"

Represents the options that are available to control the behavior of a StartTaskExecution operation. Behavior includes preserving metadata such as user ID (UID), group ID (GID), and file permissions, and also overwriting files in the destination, data integrity verification, and so on.

A task has a set of default options associated with it. If you don't specify an option in StartTaskExecution, the default value is used. You can override the defaults options on each task execution by specifying an overriding Options value to StartTaskExecution.

" @@ -1614,6 +1756,11 @@ "DEEP_ARCHIVE" ] }, + "S3Subdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$" + }, "ScheduleExpressionCron":{ "type":"string", "max":256, @@ -1645,6 +1792,11 @@ "pattern":"^.{0,104}$", "sensitive":true }, + "SmbSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" + }, "SmbUser":{ "type":"string", "max":104, @@ -1688,11 +1840,6 @@ }, "documentation":"

StartTaskExecutionResponse

" }, - "Subdirectory":{ - "type":"string", - "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$" - }, "TagKey":{ "type":"string", "max":256, @@ -1753,7 +1900,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9\\s+=._:/-]+$" + "pattern":"^[a-zA-Z0-9\\s+=._:@/-]+$" }, "TaggableResourceArn":{ "type":"string", diff --git a/botocore/data/detective/2018-10-26/paginators-1.json b/botocore/data/detective/2018-10-26/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/detective/2018-10-26/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/detective/2018-10-26/service-2.json b/botocore/data/detective/2018-10-26/service-2.json new file mode 100644 index 00000000..078c7194 --- /dev/null +++ b/botocore/data/detective/2018-10-26/service-2.json @@ -0,0 +1,600 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-10-26", + "endpointPrefix":"api.detective", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Detective", + "serviceId":"Detective", + "signatureVersion":"v4", + "signingName":"detective", + "uid":"detective-2018-10-26" + }, + "operations":{ + "AcceptInvitation":{ + "name":"AcceptInvitation", + "http":{ + "method":"PUT", + "requestUri":"/invitation" + }, + "input":{"shape":"AcceptInvitationRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Accepts an invitation for the member account to contribute data to a behavior graph. This operation can only be called by an invited member account.

The request provides the ARN of behavior graph.

The member account status in the graph must be INVITED.

" + }, + "CreateGraph":{ + "name":"CreateGraph", + "http":{ + "method":"POST", + "requestUri":"/graph" + }, + "output":{"shape":"CreateGraphResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Creates a new behavior graph for the calling account, and sets that account as the master account. This operation is called by the account that is enabling Detective.

The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

An account can only be the master account for one behavior graph within a Region. If the same account calls CreateGraph with the same master account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

" + }, + "CreateMembers":{ + "name":"CreateMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members" + }, + "input":{"shape":"CreateMembersRequest"}, + "output":{"shape":"CreateMembersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Sends a request to invite the specified AWS accounts to be member accounts in the behavior graph. This operation can only be called by the master account for a behavior graph.

CreateMembers verifies the accounts and then sends invitations to the verified accounts.

The request provides the behavior graph ARN and the list of accounts to invite.

The response separates the requested accounts into two lists:

  • The accounts that CreateMembers was able to start the verification for. This list includes member accounts that are being verified, that have passed verification and are being sent an invitation, and that have failed verification.

  • The accounts that CreateMembers was unable to process. This list includes accounts that were already invited to be member accounts in the behavior graph.

" + }, + "DeleteGraph":{ + "name":"DeleteGraph", + "http":{ + "method":"POST", + "requestUri":"/graph/removal" + }, + "input":{"shape":"DeleteGraphRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Disables the specified behavior graph and queues it to be deleted. This operation removes the graph from each member account's list of behavior graphs.

DeleteGraph can only be called by the master account for a behavior graph.

" + }, + "DeleteMembers":{ + "name":"DeleteMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members/removal" + }, + "input":{"shape":"DeleteMembersRequest"}, + "output":{"shape":"DeleteMembersResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Deletes one or more member accounts from the master account behavior graph. This operation can only be called by a Detective master account. That account cannot use DeleteMembers to delete their own account from the behavior graph. To disable a behavior graph, the master account uses the DeleteGraph API method.

" + }, + "DisassociateMembership":{ + "name":"DisassociateMembership", + "http":{ + "method":"POST", + "requestUri":"/membership/removal" + }, + "input":{"shape":"DisassociateMembershipRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Removes the member account from the specified behavior graph. This operation can only be called by a member account that has the ENABLED status.

" + }, + "GetMembers":{ + "name":"GetMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members/get" + }, + "input":{"shape":"GetMembersRequest"}, + "output":{"shape":"GetMembersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Returns the membership details for specified member accounts for a behavior graph.

" + }, + "ListGraphs":{ + "name":"ListGraphs", + "http":{ + "method":"POST", + "requestUri":"/graphs/list" + }, + "input":{"shape":"ListGraphsRequest"}, + "output":{"shape":"ListGraphsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Returns the list of behavior graphs that the calling account is a master of. This operation can only be called by a master account.

Because an account can currently only be the master of one behavior graph within a Region, the results always contain a single graph.

" + }, + "ListInvitations":{ + "name":"ListInvitations", + "http":{ + "method":"POST", + "requestUri":"/invitations/list" + }, + "input":{"shape":"ListInvitationsRequest"}, + "output":{"shape":"ListInvitationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Retrieves the list of open and accepted behavior graph invitations for the member account. This operation can only be called by a member account.

Open invitations are invitations that the member account has not responded to.

The results do not include behavior graphs for which the member account declined the invitation. The results also do not include behavior graphs that the member account resigned from or was removed from.

" + }, + "ListMembers":{ + "name":"ListMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members/list" + }, + "input":{"shape":"ListMembersRequest"}, + "output":{"shape":"ListMembersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Retrieves the list of member accounts for a behavior graph. Does not return member accounts that were removed from the behavior graph.

" + }, + "RejectInvitation":{ + "name":"RejectInvitation", + "http":{ + "method":"POST", + "requestUri":"/invitation/removal" + }, + "input":{"shape":"RejectInvitationRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Amazon Detective is currently in preview.

Rejects an invitation to contribute the account data to a behavior graph. This operation must be called by a member account that has the INVITED status.

" + } + }, + "shapes":{ + "AcceptInvitationRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph that the member account is accepting the invitation for.

The member account status in the behavior graph must be INVITED.

" + } + } + }, + "Account":{ + "type":"structure", + "required":[ + "AccountId", + "EmailAddress" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account identifier of the AWS account.

" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The AWS account root user email address for the AWS account.

" + } + }, + "documentation":"

Amazon Detective is currently in preview.

An AWS account that is the master of or a member of a behavior graph.

" + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]+$" + }, + "AccountIdList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":50, + "min":1 + }, + "AccountList":{ + "type":"list", + "member":{"shape":"Account"}, + "max":50, + "min":1 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request attempted an invalid action.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateGraphResponse":{ + "type":"structure", + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the new behavior graph.

" + } + } + }, + "CreateMembersRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "Accounts" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph to invite the member accounts to contribute their data to.

" + }, + "Message":{ + "shape":"EmailMessage", + "documentation":"

Customized message text to include in the invitation email message to the invited member accounts.

" + }, + "Accounts":{ + "shape":"AccountList", + "documentation":"

The list of AWS accounts to invite to become member accounts in the behavior graph. For each invited account, the account list contains the account identifier and the AWS account root user email address.

" + } + } + }, + "CreateMembersResponse":{ + "type":"structure", + "members":{ + "Members":{ + "shape":"MemberDetailList", + "documentation":"

The set of member account invitation requests that Detective was able to process. This includes accounts that are being verified, that failed verification, and that passed verification and are being sent an invitation.

" + }, + "UnprocessedAccounts":{ + "shape":"UnprocessedAccountList", + "documentation":"

The list of accounts for which Detective was unable to process the invitation request. For each account, the list provides the reason why the request could not be processed. The list includes accounts that are already member accounts in the behavior graph.

" + } + } + }, + "DeleteGraphRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph to disable.

" + } + } + }, + "DeleteMembersRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "AccountIds" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph to delete members from.

" + }, + "AccountIds":{ + "shape":"AccountIdList", + "documentation":"

The list of AWS account identifiers for the member accounts to delete from the behavior graph.

" + } + } + }, + "DeleteMembersResponse":{ + "type":"structure", + "members":{ + "AccountIds":{ + "shape":"AccountIdList", + "documentation":"

The list of AWS account identifiers for the member accounts that Detective successfully deleted from the behavior graph.

" + }, + "UnprocessedAccounts":{ + "shape":"UnprocessedAccountList", + "documentation":"

The list of member accounts that Detective was not able to delete from the behavior graph. For each member account, provides the reason that the deletion could not be processed.

" + } + } + }, + "DisassociateMembershipRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph to remove the member account from.

The member account's member status in the behavior graph must be ENABLED.

" + } + } + }, + "EmailAddress":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^.+@.+$" + }, + "EmailMessage":{ + "type":"string", + "max":1000, + "min":1 + }, + "ErrorMessage":{"type":"string"}, + "GetMembersRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "AccountIds" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph for which to request the member details.

" + }, + "AccountIds":{ + "shape":"AccountIdList", + "documentation":"

The list of AWS account identifiers for the member account for which to return member details.

You cannot use GetMembers to retrieve information about member accounts that were removed from the behavior graph.

" + } + } + }, + "GetMembersResponse":{ + "type":"structure", + "members":{ + "MemberDetails":{ + "shape":"MemberDetailList", + "documentation":"

The member account details that Detective is returning in response to the request.

" + }, + "UnprocessedAccounts":{ + "shape":"UnprocessedAccountList", + "documentation":"

The requested member accounts for which Detective was unable to return member details.

For each account, provides the reason why the request could not be processed.

" + } + } + }, + "Graph":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the behavior graph was created. The value is in milliseconds since the epoch.

" + } + }, + "documentation":"

Amazon Detective is currently in preview.

A behavior graph in Detective.

" + }, + "GraphArn":{ + "type":"string", + "pattern":"^arn:aws[-\\w]{0,10}?:detective:[-\\w]{2,20}?:\\d{12}?:graph:[abcdef\\d]{32}?$" + }, + "GraphList":{ + "type":"list", + "member":{"shape":"Graph"} + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request was valid but failed because of a problem with the service.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "ListGraphsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

For requests to get the next page of results, the pagination token that was returned with the previous set of results. The initial request does not include a pagination token.

" + }, + "MaxResults":{ + "shape":"MemberResultsLimit", + "documentation":"

The maximum number of graphs to return at a time. The total must be less than the overall limit on the number of results to return, which is currently 200.

" + } + } + }, + "ListGraphsResponse":{ + "type":"structure", + "members":{ + "GraphList":{ + "shape":"GraphList", + "documentation":"

A list of behavior graphs that the account is a master for.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more behavior graphs remaining in the results, then this is the pagination token to use to request the next page of behavior graphs.

" + } + } + }, + "ListInvitationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

For requests to retrieve the next page of results, the pagination token that was returned with the previous page of results. The initial request does not include a pagination token.

" + }, + "MaxResults":{ + "shape":"MemberResultsLimit", + "documentation":"

The maximum number of behavior graph invitations to return in the response. The total must be less than the overall limit on the number of results to return, which is currently 200.

" + } + } + }, + "ListInvitationsResponse":{ + "type":"structure", + "members":{ + "Invitations":{ + "shape":"MemberDetailList", + "documentation":"

The list of behavior graphs for which the member account has open or accepted invitations.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more behavior graphs remaining in the results, then this is the pagination token to use to request the next page of behavior graphs.

" + } + } + }, + "ListMembersRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph for which to retrieve the list of member accounts.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

For requests to retrieve the next page of member account results, the pagination token that was returned with the previous page of results. The initial request does not include a pagination token.

" + }, + "MaxResults":{ + "shape":"MemberResultsLimit", + "documentation":"

The maximum number of member accounts to include in the response. The total must be less than the overall limit on the number of results to return, which is currently 200.

" + } + } + }, + "ListMembersResponse":{ + "type":"structure", + "members":{ + "MemberDetails":{ + "shape":"MemberDetailList", + "documentation":"

The list of member accounts in the behavior graph.

The results include member accounts that did not pass verification and member accounts that have not yet accepted the invitation to the behavior graph. The results do not include member accounts that were removed from the behavior graph.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more member accounts remaining in the results, then this is the pagination token to use to request the next page of member accounts.

" + } + } + }, + "MemberDetail":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account identifier for the member account.

" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The AWS account root user email address for the member account.

" + }, + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph that the member account was invited to.

" + }, + "MasterId":{ + "shape":"AccountId", + "documentation":"

The AWS account identifier of the master account for the behavior graph.

" + }, + "Status":{ + "shape":"MemberStatus", + "documentation":"

The current membership status of the member account. The status can have one of the following values:

  • INVITED - Indicates that the member was sent an invitation but has not yet responded.

  • VERIFICATION_IN_PROGRESS - Indicates that Detective is verifying that the account identifier and email address provided for the member account match. If they do match, then Detective sends the invitation. If the email address and account identifier don't match, then the member cannot be added to the behavior graph.

  • VERIFICATION_FAILED - Indicates that the account and email address provided for the member account do not match, and Detective did not send an invitation to the account.

  • ENABLED - Indicates that the member account accepted the invitation to contribute to the behavior graph.

Member accounts that declined an invitation or that were removed from the behavior graph are not included.

" + }, + "InvitedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that Detective sent the invitation to the member account. The value is in milliseconds since the epoch.

" + }, + "UpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the member account was last updated. The value is in milliseconds since the epoch.

" + } + }, + "documentation":"

Amazon Detective is currently in preview.

Details about a member account that was invited to contribute to a behavior graph.

" + }, + "MemberDetailList":{ + "type":"list", + "member":{"shape":"MemberDetail"} + }, + "MemberResultsLimit":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "MemberStatus":{ + "type":"string", + "enum":[ + "INVITED", + "VERIFICATION_IN_PROGRESS", + "VERIFICATION_FAILED", + "ENABLED" + ] + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "RejectInvitationRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

The ARN of the behavior graph to reject the invitation to.

The member account's current member status in the behavior graph must be INVITED.

" + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request refers to a nonexistent resource.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

This request would cause the number of member accounts in the behavior graph to exceed the maximum allowed. A behavior graph cannot have more than 1000 member accounts.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UnprocessedAccount":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account identifier of the member account that was not processed.

" + }, + "Reason":{ + "shape":"UnprocessedReason", + "documentation":"

The reason that the member account request could not be processed.

" + } + }, + "documentation":"

Amazon Detective is currently in preview.

A member account that was included in a request but for which the request could not be processed.

" + }, + "UnprocessedAccountList":{ + "type":"list", + "member":{"shape":"UnprocessedAccount"} + }, + "UnprocessedReason":{"type":"string"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request parameters are invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

Amazon Detective is currently in preview. The Detective API can only be used by accounts that are admitted into the preview.

Detective uses machine learning and purpose-built visualizations to help you analyze and investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by a master account.

Every behavior graph is specific to a Region. You can only use the API to manage graphs that belong to the Region that is associated with the currently selected endpoint.

A Detective master account can use the Detective API to do the following:

  • Enable and disable Detective. Enabling Detective creates a new behavior graph.

  • View the list of member accounts in a behavior graph.

  • Add member accounts to a behavior graph.

  • Remove member accounts from a behavior graph.

A member account can use the Detective API to do the following:

  • View the list of behavior graphs that they are invited to.

  • Accept an invitation to contribute to a behavior graph.

  • Decline an invitation to contribute to a behavior graph.

  • Remove their account from a behavior graph.

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

" +} diff --git a/botocore/data/devicefarm/2015-06-23/service-2.json b/botocore/data/devicefarm/2015-06-23/service-2.json index 975b226d..d47c3013 100644 --- a/botocore/data/devicefarm/2015-06-23/service-2.json +++ b/botocore/data/devicefarm/2015-06-23/service-2.json @@ -75,7 +75,7 @@ {"shape":"ServiceAccountException"}, {"shape":"TagOperationException"} ], - "documentation":"

Creates a new project.

" + "documentation":"

Creates a project.

" }, "CreateRemoteAccessSession":{ "name":"CreateRemoteAccessSession", @@ -93,6 +93,34 @@ ], "documentation":"

Specifies and starts a remote access session.

" }, + "CreateTestGridProject":{ + "name":"CreateTestGridProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTestGridProjectRequest"}, + "output":{"shape":"CreateTestGridProjectResult"}, + "errors":[ + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a Selenium testing project. Projects are used to track TestGridSession instances.

" + }, + "CreateTestGridUrl":{ + "name":"CreateTestGridUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTestGridUrlRequest"}, + "output":{"shape":"CreateTestGridUrlResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a signed, short-term URL that can be passed to a Selenium RemoteWebDriver constructor.

" + }, "CreateUpload":{ "name":"CreateUpload", "http":{ @@ -186,7 +214,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Deletes an AWS Device Farm project, given the project ARN.

Note Deleting this resource does not stop an in-progress run.

" + "documentation":"

Deletes an AWS Device Farm project, given the project ARN.

Deleting this resource does not stop an in-progress run.

" }, "DeleteRemoteAccessSession":{ "name":"DeleteRemoteAccessSession", @@ -218,7 +246,23 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Deletes the run, given the run ARN.

Note Deleting this resource does not stop an in-progress run.

" + "documentation":"

Deletes the run, given the run ARN.

Deleting this resource does not stop an in-progress run.

" + }, + "DeleteTestGridProject":{ + "name":"DeleteTestGridProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTestGridProjectRequest"}, + "output":{"shape":"DeleteTestGridProjectResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"CannotDeleteException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes a Selenium testing project and all content generated under it.

You cannot undo this operation.

You cannot delete a project if it has active sessions.

" }, "DeleteUpload":{ "name":"DeleteUpload", @@ -266,7 +310,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.

" + "documentation":"

Returns the number of unmetered iOS or unmetered Android devices that have been purchased by the account.

" }, "GetDevice":{ "name":"GetDevice", @@ -298,7 +342,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Returns information about a device instance belonging to a private device fleet.

" + "documentation":"

Returns information about a device instance that belongs to a private device fleet.

" }, "GetDevicePool":{ "name":"GetDevicePool", @@ -395,7 +439,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

" + "documentation":"

Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. If you must be able to invoke this operation, contact aws-devicefarm-support@amazon.com.

" }, "GetProject":{ "name":"GetProject", @@ -477,6 +521,36 @@ ], "documentation":"

Gets information about a test.

" }, + "GetTestGridProject":{ + "name":"GetTestGridProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTestGridProjectRequest"}, + "output":{"shape":"GetTestGridProjectResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves information about a Selenium testing project.

" + }, + "GetTestGridSession":{ + "name":"GetTestGridSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTestGridSessionRequest"}, + "output":{"shape":"GetTestGridSessionResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

A session is an instance of a browser created through a RemoteWebDriver with the URL from CreateTestGridUrlResult$url. You can use the following to look up sessions:

" + }, "GetUpload":{ "name":"GetUpload", "http":{ @@ -651,7 +725,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Returns a list of offering promotions. Each offering promotion record contains the ID and description of the promotion. The API returns a NotEligible error if the caller is not permitted to invoke the operation. Contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

" + "documentation":"

Returns a list of offering promotions. Each offering promotion record contains the ID and description of the promotion. The API returns a NotEligible error if the caller is not permitted to invoke the operation. Contact aws-devicefarm-support@amazon.com if you must be able to invoke this operation.

" }, "ListOfferingTransactions":{ "name":"ListOfferingTransactions", @@ -668,7 +742,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

" + "documentation":"

Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a NotEligible error if the user is not permitted to invoke the operation. If you must be able to invoke this operation, contact aws-devicefarm-support@amazon.com.

" }, "ListOfferings":{ "name":"ListOfferings", @@ -685,7 +759,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

" + "documentation":"

Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a NotEligible error if the user is not permitted to invoke the operation. If you must be able to invoke this operation, contact aws-devicefarm-support@amazon.com.

" }, "ListProjects":{ "name":"ListProjects", @@ -776,11 +850,71 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ + {"shape":"ArgumentException"}, {"shape":"NotFoundException"}, {"shape":"TagOperationException"} ], "documentation":"

List the tags for an AWS Device Farm resource.

" }, + "ListTestGridProjects":{ + "name":"ListTestGridProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTestGridProjectsRequest"}, + "output":{"shape":"ListTestGridProjectsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Gets a list of all Selenium testing projects in your account.

" + }, + "ListTestGridSessionActions":{ + "name":"ListTestGridSessionActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTestGridSessionActionsRequest"}, + "output":{"shape":"ListTestGridSessionActionsResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a list of the actions taken in a TestGridSession.

" + }, + "ListTestGridSessionArtifacts":{ + "name":"ListTestGridSessionArtifacts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTestGridSessionArtifactsRequest"}, + "output":{"shape":"ListTestGridSessionArtifactsResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves a list of artifacts created during the session.

" + }, + "ListTestGridSessions":{ + "name":"ListTestGridSessions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTestGridSessionsRequest"}, + "output":{"shape":"ListTestGridSessionsResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves a list of sessions for a TestGridProject.

" + }, "ListTests":{ "name":"ListTests", "http":{ @@ -811,7 +945,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Gets information about unique problems.

" + "documentation":"

Gets information about unique problems, such as exceptions or crashes.

Unique problems are defined as a single instance of an error across a run, job, or suite. For example, if a call in your application consistently raises an exception (OutOfBoundsException in MyActivity.java:386), ListUniqueProblems returns a single entry instead of many individual entries for that exception.

" }, "ListUploads":{ "name":"ListUploads", @@ -858,7 +992,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

" + "documentation":"

Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a NotEligible error if the user is not permitted to invoke the operation. If you must be able to invoke this operation, contact aws-devicefarm-support@amazon.com.

" }, "RenewOffering":{ "name":"RenewOffering", @@ -875,7 +1009,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Explicitly sets the quantity of devices to renew for an offering, starting from the effectiveDate of the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

" + "documentation":"

Explicitly sets the quantity of devices to renew for an offering, starting from the effectiveDate of the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. If you must be able to invoke this operation, contact aws-devicefarm-support@amazon.com.

" }, "ScheduleRun":{ "name":"ScheduleRun", @@ -908,7 +1042,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Initiates a stop request for the current job. AWS Device Farm will immediately stop the job on the device where tests have not started executing, and you will not be billed for this device. On the device where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on the device. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.

" + "documentation":"

Initiates a stop request for the current job. AWS Device Farm immediately stops the job on the device where tests have not started. You are not billed for this device. On the device where tests have started, setup suite and teardown suite tests run to completion on the device. You are billed for setup, teardown, and any tests that were in progress or already completed.

" }, "StopRemoteAccessSession":{ "name":"StopRemoteAccessSession", @@ -940,7 +1074,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Initiates a stop request for the current test run. AWS Device Farm will immediately stop the run on devices where tests have not started executing, and you will not be billed for these devices. On devices where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on those devices. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.

" + "documentation":"

Initiates a stop request for the current test run. AWS Device Farm immediately stops the run on devices where tests have not started. You are not billed for these devices. On devices where tests have started executing, setup suite and teardown suite tests run to completion on those devices. You are billed for setup, teardown, and any tests that were in progress or already completed.

" }, "TagResource":{ "name":"TagResource", @@ -951,12 +1085,13 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ + {"shape":"ArgumentException"}, {"shape":"NotFoundException"}, {"shape":"TagOperationException"}, {"shape":"TooManyTagsException"}, {"shape":"TagPolicyException"} ], - "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.

" + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.

" }, "UntagResource":{ "name":"UntagResource", @@ -967,6 +1102,7 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ + {"shape":"ArgumentException"}, {"shape":"NotFoundException"}, {"shape":"TagOperationException"} ], @@ -986,7 +1122,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Updates information about an existing private device instance.

" + "documentation":"

Updates information about a private device instance.

" }, "UpdateDevicePool":{ "name":"UpdateDevicePool", @@ -1034,7 +1170,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Updates the network profile with specific settings.

" + "documentation":"

Updates the network profile.

" }, "UpdateProject":{ "name":"UpdateProject", @@ -1052,6 +1188,21 @@ ], "documentation":"

Modifies the specified project name, given the project ARN and a new name.

" }, + "UpdateTestGridProject":{ + "name":"UpdateTestGridProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTestGridProjectRequest"}, + "output":{"shape":"UpdateTestGridProjectResult"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ArgumentException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Change details of a project.

" + }, "UpdateUpload":{ "name":"UpdateUpload", "http":{ @@ -1066,7 +1217,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Update an uploaded test specification (test spec).

" + "documentation":"

Updates an uploaded test spec.

" }, "UpdateVPCEConfiguration":{ "name":"UpdateVPCEConfiguration", @@ -1082,7 +1233,7 @@ {"shape":"ServiceAccountException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Updates information about an existing Amazon Virtual Private Cloud (VPC) endpoint configuration.

" + "documentation":"

Updates information about an Amazon Virtual Private Cloud (VPC) endpoint configuration.

" } }, "shapes":{ @@ -1108,7 +1259,7 @@ }, "maxJobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

The maximum number of minutes a test run will execute before it times out.

" + "documentation":"

The maximum number of minutes a test run executes before it times out.

" }, "trialMinutes":{ "shape":"TrialMinutes", @@ -1120,20 +1271,21 @@ }, "defaultJobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

The default number of minutes (at the account level) a test run will execute before it times out. The default value is 150 minutes.

" + "documentation":"

The default number of minutes (at the account level) a test run executes before it times out. The default value is 150 minutes.

" }, "skipAppResign":{ "shape":"SkipAppResign", - "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" + "documentation":"

When set to true, for private devices, Device Farm does not sign your app again. For public devices, Device Farm always signs your apps again.

For more information about how Device Farm re-signs your apps, see Do you modify my app? in the AWS Device Farm FAQs.

" } }, - "documentation":"

A container for account-level settings within AWS Device Farm.

" + "documentation":"

A container for account-level settings in AWS Device Farm.

" }, "AccountsCleanup":{"type":"boolean"}, "AmazonResourceName":{ "type":"string", "max":1011, - "min":32 + "min":32, + "pattern":"^arn:.+" }, "AmazonResourceNames":{ "type":"list", @@ -1168,7 +1320,7 @@ }, "type":{ "shape":"ArtifactType", - "documentation":"

The artifact's type.

Allowed values include the following:

  • UNKNOWN: An unknown type.

  • SCREENSHOT: The screenshot type.

  • DEVICE_LOG: The device log type.

  • MESSAGE_LOG: The message log type.

  • VIDEO_LOG: The video log type.

  • RESULT_LOG: The result log type.

  • SERVICE_LOG: The service log type.

  • WEBKIT_LOG: The web kit log type.

  • INSTRUMENTATION_OUTPUT: The instrumentation type.

  • EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an Android fuzz test.

  • CALABASH_JSON_OUTPUT: The Calabash JSON output type.

  • CALABASH_PRETTY_OUTPUT: The Calabash pretty output type.

  • CALABASH_STANDARD_OUTPUT: The Calabash standard output type.

  • CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type.

  • AUTOMATION_OUTPUT: The automation output type.

  • APPIUM_SERVER_OUTPUT: The Appium server output type.

  • APPIUM_JAVA_OUTPUT: The Appium Java output type.

  • APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type.

  • APPIUM_PYTHON_OUTPUT: The Appium Python output type.

  • APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type.

  • EXPLORER_EVENT_LOG: The Explorer event log output type.

  • EXPLORER_SUMMARY_LOG: The Explorer summary log output type.

  • APPLICATION_CRASH_REPORT: The application crash report output type.

  • XCTEST_LOG: The Xcode test output type.

  • VIDEO: The Video output type.

  • CUSTOMER_ARTIFACT:The Customer Artifact output type.

  • CUSTOMER_ARTIFACT_LOG: The Customer Artifact Log output type.

  • TESTSPEC_OUTPUT: The Test Spec Output type.

" + "documentation":"

The artifact's type.

Allowed values include the following:

  • UNKNOWN

  • SCREENSHOT

  • DEVICE_LOG

  • MESSAGE_LOG

  • VIDEO_LOG

  • RESULT_LOG

  • SERVICE_LOG

  • WEBKIT_LOG

  • INSTRUMENTATION_OUTPUT

  • EXERCISER_MONKEY_OUTPUT: the artifact (log) generated by an Android fuzz test.

  • CALABASH_JSON_OUTPUT

  • CALABASH_PRETTY_OUTPUT

  • CALABASH_STANDARD_OUTPUT

  • CALABASH_JAVA_XML_OUTPUT

  • AUTOMATION_OUTPUT

  • APPIUM_SERVER_OUTPUT

  • APPIUM_JAVA_OUTPUT

  • APPIUM_JAVA_XML_OUTPUT

  • APPIUM_PYTHON_OUTPUT

  • APPIUM_PYTHON_XML_OUTPUT

  • EXPLORER_EVENT_LOG

  • EXPLORER_SUMMARY_LOG

  • APPLICATION_CRASH_REPORT

  • XCTEST_LOG

  • VIDEO

  • CUSTOMER_ARTIFACT

  • CUSTOMER_ARTIFACT_LOG

  • TESTSPEC_OUTPUT

" }, "extension":{ "shape":"String", @@ -1176,7 +1328,7 @@ }, "url":{ "shape":"URL", - "documentation":"

The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the artifact's file.

" + "documentation":"

The presigned Amazon S3 URL that can be used with a GET request to download the artifact's file.

" } }, "documentation":"

Represents the output of a test. Examples of artifacts include logs and screenshots.

" @@ -1243,14 +1395,22 @@ }, "architecture":{ "shape":"String", - "documentation":"

The CPU's architecture, for example x86 or ARM.

" + "documentation":"

The CPU's architecture (for example, x86 or ARM).

" }, "clock":{ "shape":"Double", "documentation":"

The clock speed of the device's CPU, expressed in hertz (Hz). For example, a 1.2 GHz CPU is expressed as 1200000000.

" } }, - "documentation":"

Represents the amount of CPU that an app is using on a physical device.

Note that this does not represent system-wide CPU usage.

" + "documentation":"

Represents the amount of CPU that an app is using on a physical device. Does not represent system-wide CPU usage.

" + }, + "CannotDeleteException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

The requested object could not be deleted.

", + "exception":true }, "ClientId":{ "type":"string", @@ -1322,7 +1482,7 @@ }, "maxDevices":{ "shape":"Integer", - "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and that meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

" + "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

" } }, "documentation":"

Represents a request to the create device pool operation.

" @@ -1351,15 +1511,15 @@ }, "packageCleanup":{ "shape":"Boolean", - "documentation":"

When set to true, Device Farm will remove app packages after a test run. The default value is false for private devices.

" + "documentation":"

When set to true, Device Farm removes app packages after a test run. The default value is false for private devices.

" }, "excludeAppPackagesFromCleanup":{ "shape":"PackageIds", - "documentation":"

An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.

The list of packages is only considered if you set packageCleanup to true.

" + "documentation":"

An array of strings that specifies the list of app packages that should not be cleaned up from the device after a test run.

The list of packages is considered only if you set packageCleanup to true.

" }, "rebootAfterUse":{ "shape":"Boolean", - "documentation":"

When set to true, Device Farm will reboot the instance after a test run. The default value is true.

" + "documentation":"

When set to true, Device Farm reboots the instance after a test run. The default value is true.

" } } }, @@ -1368,7 +1528,7 @@ "members":{ "instanceProfile":{ "shape":"InstanceProfile", - "documentation":"

An object containing information about your instance profile.

" + "documentation":"

An object that contains information about your instance profile.

" } } }, @@ -1385,7 +1545,7 @@ }, "name":{ "shape":"Name", - "documentation":"

The name you wish to specify for the new network profile.

" + "documentation":"

The name for the new network profile.

" }, "description":{ "shape":"Message", @@ -1393,7 +1553,7 @@ }, "type":{ "shape":"NetworkProfileType", - "documentation":"

The type of network profile you wish to create. Valid values are listed below.

" + "documentation":"

The type of network profile to create. Valid values are listed here.

" }, "uplinkBandwidthBits":{ "shape":"Long", @@ -1448,7 +1608,7 @@ }, "defaultJobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

Sets the execution timeout value (in minutes) for a project. All test runs in this project will use the specified execution timeout value unless overridden when scheduling a run.

" + "documentation":"

Sets the execution timeout value (in minutes) for a project. All test runs in this project use the specified execution timeout value unless overridden when scheduling a run.

" } }, "documentation":"

Represents a request to the create project operation.

" @@ -1472,7 +1632,7 @@ }, "vpceConfigurationArns":{ "shape":"AmazonResourceNames", - "documentation":"

An array of Amazon Resource Names (ARNs) included in the VPC endpoint configuration.

" + "documentation":"

An array of ARNs included in the VPC endpoint configuration.

" } }, "documentation":"

Configuration settings for a remote access session, including billing method.

" @@ -1490,7 +1650,7 @@ }, "deviceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the device for which you want to create a remote access session.

" + "documentation":"

The ARN of the device for which you want to create a remote access session.

" }, "instanceArn":{ "shape":"AmazonResourceName", @@ -1498,11 +1658,11 @@ }, "sshPublicKey":{ "shape":"SshPublicKey", - "documentation":"

Ignored. The public key of the ssh key pair you want to use for connecting to remote devices in your remote debugging session. This is only required if remoteDebugEnabled is set to true.

Remote debugging is no longer supported.

" + "documentation":"

Ignored. The public key of the ssh key pair you want to use for connecting to remote devices in your remote debugging session. This key is required only if remoteDebugEnabled is set to true.

Remote debugging is no longer supported.

" }, "remoteDebugEnabled":{ "shape":"Boolean", - "documentation":"

Set to true if you want to access devices remotely for debugging in your remote access session.

Remote debugging is no longer supported.

" + "documentation":"

Set to true if you want to access devices remotely for debugging in your remote access session.

Remote debugging is no longer supported.

" }, "remoteRecordEnabled":{ "shape":"Boolean", @@ -1514,11 +1674,11 @@ }, "name":{ "shape":"Name", - "documentation":"

The name of the remote access session that you wish to create.

" + "documentation":"

The name of the remote access session to create.

" }, "clientId":{ "shape":"ClientId", - "documentation":"

Unique identifier for the client. If you want access to multiple devices on the same client, you should pass the same clientId value in each call to CreateRemoteAccessSession. This is required only if remoteDebugEnabled is set to true.

Remote debugging is no longer supported.

" + "documentation":"

Unique identifier for the client. If you want access to multiple devices on the same client, you should pass the same clientId value in each call to CreateRemoteAccessSession. This identifier is required only if remoteDebugEnabled is set to true.

Remote debugging is no longer supported.

" }, "configuration":{ "shape":"CreateRemoteAccessSessionConfiguration", @@ -1526,11 +1686,11 @@ }, "interactionMode":{ "shape":"InteractionMode", - "documentation":"

The interaction mode of the remote access session. Valid values are:

  • INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You cannot run XCUITest framework-based tests in this mode.

  • NO_VIDEO: You are connected to the device but cannot interact with it or view the screen. This mode has the fastest test execution speed. You can run XCUITest framework-based tests in this mode.

  • VIDEO_ONLY: You can view the screen but cannot touch or rotate it. You can run XCUITest framework-based tests and watch the screen in this mode.

" + "documentation":"

The interaction mode of the remote access session. Valid values are:

  • INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You cannot run XCUITest framework-based tests in this mode.

  • NO_VIDEO: You are connected to the device, but cannot interact with it or view the screen. This mode has the fastest test execution speed. You can run XCUITest framework-based tests in this mode.

  • VIDEO_ONLY: You can view the screen, but cannot touch or rotate it. You can run XCUITest framework-based tests and watch the screen in this mode.

" }, "skipAppResign":{ "shape":"Boolean", - "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" + "documentation":"

When set to true, for private devices, Device Farm does not sign your app again. For public devices, Device Farm always signs your apps again.

For more information on how Device Farm modifies your uploads during tests, see Do you modify my app?

" } }, "documentation":"

Creates and submits a request to start a remote access session.

" @@ -1545,6 +1705,59 @@ }, "documentation":"

Represents the server response from a request to create a remote access session.

" }, + "CreateTestGridProjectRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

Human-readable name of the Selenium testing project.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

Human-readable description of the project.

" + } + } + }, + "CreateTestGridProjectResult":{ + "type":"structure", + "members":{ + "testGridProject":{ + "shape":"TestGridProject", + "documentation":"

ARN of the Selenium testing project that was created.

" + } + } + }, + "CreateTestGridUrlRequest":{ + "type":"structure", + "required":[ + "projectArn", + "expiresInSeconds" + ], + "members":{ + "projectArn":{ + "shape":"DeviceFarmArn", + "documentation":"

ARN (from CreateTestGridProject or ListTestGridProjects) to associate with the short-term URL.

" + }, + "expiresInSeconds":{ + "shape":"TestGridUrlExpiresInSecondsInput", + "documentation":"

Lifetime, in seconds, of the URL.

" + } + } + }, + "CreateTestGridUrlResult":{ + "type":"structure", + "members":{ + "url":{ + "shape":"String", + "documentation":"

A signed URL, expiring in CreateTestGridUrlRequest$expiresInSeconds seconds, to be passed to a RemoteWebDriver.

" + }, + "expires":{ + "shape":"DateTime", + "documentation":"

The number of seconds the URL from CreateTestGridUrlResult$url stays active.

" + } + } + }, "CreateUploadRequest":{ "type":"structure", "required":[ @@ -1559,15 +1772,15 @@ }, "name":{ "shape":"Name", - "documentation":"

The upload's file name. The name should not contain the '/' character. If uploading an iOS app, the file name needs to end with the .ipa extension. If uploading an Android app, the file name needs to end with the .apk extension. For all others, the file name must end with the .zip file extension.

" + "documentation":"

The upload's file name. The name should not contain any forward slashes (/). If you are uploading an iOS app, the file name must end with the .ipa extension. If you are uploading an Android app, the file name must end with the .apk extension. For all others, the file name must end with the .zip file extension.

" }, "type":{ "shape":"UploadType", - "documentation":"

The upload's upload type.

Must be one of the following values:

  • ANDROID_APP: An Android upload.

  • IOS_APP: An iOS upload.

  • WEB_APP: A web application upload.

  • EXTERNAL_DATA: An external data upload.

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

  • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

  • APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.

  • APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for a web app.

  • APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for a web app.

  • APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for a web app.

  • CALABASH_TEST_PACKAGE: A Calabash test package upload.

  • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

  • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

  • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

  • XCTEST_TEST_PACKAGE: An Xcode test package upload.

  • XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload.

  • APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.

  • APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.

  • APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.

  • APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.

  • APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.

  • APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.

  • APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.

  • XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload.

Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

" + "documentation":"

The upload's upload type.

Must be one of the following values:

  • ANDROID_APP

  • IOS_APP

  • WEB_APP

  • EXTERNAL_DATA

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE

  • APPIUM_PYTHON_TEST_PACKAGE

  • APPIUM_NODE_TEST_PACKAGE

  • APPIUM_RUBY_TEST_PACKAGE

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE

  • APPIUM_WEB_PYTHON_TEST_PACKAGE

  • APPIUM_WEB_NODE_TEST_PACKAGE

  • APPIUM_WEB_RUBY_TEST_PACKAGE

  • CALABASH_TEST_PACKAGE

  • INSTRUMENTATION_TEST_PACKAGE

  • UIAUTOMATION_TEST_PACKAGE

  • UIAUTOMATOR_TEST_PACKAGE

  • XCTEST_TEST_PACKAGE

  • XCTEST_UI_TEST_PACKAGE

  • APPIUM_JAVA_JUNIT_TEST_SPEC

  • APPIUM_JAVA_TESTNG_TEST_SPEC

  • APPIUM_PYTHON_TEST_SPEC

  • APPIUM_NODE_TEST_SPEC

  • APPIUM_RUBY_TEST_SPEC

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC

  • APPIUM_WEB_PYTHON_TEST_SPEC

  • APPIUM_WEB_NODE_TEST_SPEC

  • APPIUM_WEB_RUBY_TEST_SPEC

  • INSTRUMENTATION_TEST_SPEC

  • XCTEST_UI_TEST_SPEC

If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

" }, "contentType":{ "shape":"ContentType", - "documentation":"

The upload's content type (for example, \"application/octet-stream\").

" + "documentation":"

The upload's content type (for example, application/octet-stream).

" } }, "documentation":"

Represents a request to the create upload operation.

" @@ -1596,7 +1809,7 @@ }, "vpceServiceName":{ "shape":"VPCEServiceName", - "documentation":"

The name of the VPC endpoint service running inside your AWS account that you want Device Farm to test.

" + "documentation":"

The name of the VPC endpoint service running in your AWS account that you want Device Farm to test.

" }, "serviceDnsName":{ "shape":"ServiceDnsName", @@ -1604,7 +1817,7 @@ }, "vpceConfigurationDescription":{ "shape":"VPCEConfigurationDescription", - "documentation":"

An optional description, providing more details about your VPC endpoint configuration.

" + "documentation":"

An optional description that provides details about your VPC endpoint configuration.

" } } }, @@ -1613,7 +1826,7 @@ "members":{ "vpceConfiguration":{ "shape":"VPCEConfiguration", - "documentation":"

An object containing information about your VPC endpoint configuration.

" + "documentation":"

An object that contains information about your VPC endpoint configuration.

" } } }, @@ -1626,18 +1839,18 @@ "members":{ "iosPaths":{ "shape":"IosPaths", - "documentation":"

Comma-separated list of paths on the iOS device where the artifacts generated by the customer's tests will be pulled from.

" + "documentation":"

Comma-separated list of paths on the iOS device where the artifacts generated by the customer's tests are pulled from.

" }, "androidPaths":{ "shape":"AndroidPaths", - "documentation":"

Comma-separated list of paths on the Android device where the artifacts generated by the customer's tests will be pulled from.

" + "documentation":"

Comma-separated list of paths on the Android device where the artifacts generated by the customer's tests are pulled from.

" }, "deviceHostPaths":{ "shape":"DeviceHostPaths", - "documentation":"

Comma-separated list of paths in the test execution environment where the artifacts generated by the customer's tests will be pulled from.

" + "documentation":"

Comma-separated list of paths in the test execution environment where the artifacts generated by the customer's tests are pulled from.

" } }, - "documentation":"

A JSON object specifying the paths where the artifacts generated by the customer's tests, on the device or in the test environment, will be pulled from.

Specify deviceHostPaths and optionally specify either iosPaths or androidPaths.

For web app tests, you can specify both iosPaths and androidPaths.

" + "documentation":"

A JSON object that specifies the paths where the artifacts generated by the customer's tests, on the device or in the test environment, are pulled from.

Specify deviceHostPaths and optionally specify either iosPaths or androidPaths.

For web app tests, you can specify both iosPaths and androidPaths.

" }, "DateTime":{"type":"timestamp"}, "DeleteDevicePoolRequest":{ @@ -1646,7 +1859,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm device pool you wish to delete.

" + "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm device pool to delete.

" } }, "documentation":"

Represents a request to the delete device pool operation.

" @@ -1678,7 +1891,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the network profile you want to delete.

" + "documentation":"

The ARN of the network profile to delete.

" } } }, @@ -1693,7 +1906,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm project you wish to delete.

" + "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm project to delete.

" } }, "documentation":"

Represents a request to the delete project operation.

" @@ -1727,7 +1940,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) for the run you wish to delete.

" + "documentation":"

The Amazon Resource Name (ARN) for the run to delete.

" } }, "documentation":"

Represents a request to the delete run operation.

" @@ -1738,13 +1951,28 @@ }, "documentation":"

Represents the result of a delete run request.

" }, + "DeleteTestGridProjectRequest":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN of the project to delete, from CreateTestGridProject or ListTestGridProjects.

" + } + } + }, + "DeleteTestGridProjectResult":{ + "type":"structure", + "members":{ + } + }, "DeleteUploadRequest":{ "type":"structure", "required":["arn"], "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish to delete.

" + "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm upload to delete.

" } }, "documentation":"

Represents a request to the delete upload operation.

" @@ -1795,11 +2023,11 @@ }, "formFactor":{ "shape":"DeviceFormFactor", - "documentation":"

The device's form factor.

Allowed values include:

  • PHONE: The phone form factor.

  • TABLET: The tablet form factor.

" + "documentation":"

The device's form factor.

Allowed values include:

  • PHONE

  • TABLET

" }, "platform":{ "shape":"DevicePlatform", - "documentation":"

The device's platform.

Allowed values include:

  • ANDROID: The Android platform.

  • IOS: The iOS platform.

" + "documentation":"

The device's platform.

Allowed values include:

  • ANDROID

  • IOS

" }, "os":{ "shape":"String", @@ -1839,11 +2067,11 @@ }, "remoteDebugEnabled":{ "shape":"Boolean", - "documentation":"

This flag is set to true if remote debugging is enabled for the device.

Remote debugging is no longer supported.

" + "documentation":"

This flag is set to true if remote debugging is enabled for the device.

Remote debugging is no longer supported.

" }, "fleetType":{ "shape":"String", - "documentation":"

The type of fleet to which this device belongs. Possible values for fleet type are PRIVATE and PUBLIC.

" + "documentation":"

The type of fleet to which this device belongs. Possible values are PRIVATE and PUBLIC.

" }, "fleetName":{ "shape":"String", @@ -1851,11 +2079,11 @@ }, "instances":{ "shape":"DeviceInstances", - "documentation":"

The instances belonging to this device.

" + "documentation":"

The instances that belong to this device.

" }, "availability":{ "shape":"DeviceAvailability", - "documentation":"

Reflects how likely a device will be available for a test run. It is currently available in the ListDevices and GetDevice API methods.

" + "documentation":"

Indicates how likely a device is available for a test run. Currently available in the ListDevices and GetDevice API methods.

" } }, "documentation":"

Represents a device type that an app is tested against.

" @@ -1887,23 +2115,29 @@ "HIGHLY_AVAILABLE" ] }, + "DeviceFarmArn":{ + "type":"string", + "max":1011, + "min":32, + "pattern":"^arn:aws:devicefarm:.+" + }, "DeviceFilter":{ "type":"structure", "members":{ "attribute":{ "shape":"DeviceFilterAttribute", - "documentation":"

The aspect of a device such as platform or model used as the selection criteria in a device filter.

The supported operators for each attribute are provided in the following list.

ARN

The Amazon Resource Name (ARN) of the device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".

Supported operators: EQUALS, IN, NOT_IN

PLATFORM

The device platform. Valid values are \"ANDROID\" or \"IOS\".

Supported operators: EQUALS

OS_VERSION

The operating system version. For example, \"10.3.2\".

Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, LESS_THAN_OR_EQUALS, NOT_IN

MODEL

The device model. For example, \"iPad 5th Gen\".

Supported operators: CONTAINS, EQUALS, IN, NOT_IN

AVAILABILITY

The current availability of the device. Valid values are \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

Supported operators: EQUALS

FORM_FACTOR

The device form factor. Valid values are \"PHONE\" or \"TABLET\".

Supported operators: EQUALS

MANUFACTURER

The device manufacturer. For example, \"Apple\".

Supported operators: EQUALS, IN, NOT_IN

REMOTE_ACCESS_ENABLED

Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

REMOTE_DEBUG_ENABLED

Ignored.Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

This filter will be ignored, as remote debugging is no longer supported.

INSTANCE_ARN

The Amazon Resource Name (ARN) of the device instance.

Supported operators: EQUALS, IN, NOT_IN

INSTANCE_LABELS

The label of the device instance.

Supported operators: CONTAINS

FLEET_TYPE

The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".

Supported operators: EQUALS

" + "documentation":"

The aspect of a device such as platform or model used as the selection criteria in a device filter.

The supported operators for each attribute are provided in the following list.

ARN

The Amazon Resource Name (ARN) of the device (for example, arn:aws:devicefarm:us-west-2::device:12345Example).

Supported operators: EQUALS, IN, NOT_IN

PLATFORM

The device platform. Valid values are ANDROID or IOS.

Supported operators: EQUALS

OS_VERSION

The operating system version (for example, 10.3.2).

Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, LESS_THAN_OR_EQUALS, NOT_IN

MODEL

The device model (for example, iPad 5th Gen).

Supported operators: CONTAINS, EQUALS, IN, NOT_IN

AVAILABILITY

The current availability of the device. Valid values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

Supported operators: EQUALS

FORM_FACTOR

The device form factor. Valid values are PHONE or TABLET.

Supported operators: EQUALS

MANUFACTURER

The device manufacturer (for example, Apple).

Supported operators: EQUALS, IN, NOT_IN

REMOTE_ACCESS_ENABLED

Whether the device is enabled for remote access. Valid values are TRUE or FALSE.

Supported operators: EQUALS

REMOTE_DEBUG_ENABLED

Whether the device is enabled for remote debugging. Valid values are TRUE or FALSE.

Supported operators: EQUALS

Because remote debugging is no longer supported, this filter is ignored.

INSTANCE_ARN

The Amazon Resource Name (ARN) of the device instance.

Supported operators: EQUALS, IN, NOT_IN

INSTANCE_LABELS

The label of the device instance.

Supported operators: CONTAINS

FLEET_TYPE

The fleet type. Valid values are PUBLIC or PRIVATE.

Supported operators: EQUALS

" }, "operator":{ "shape":"RuleOperator", - "documentation":"

Specifies how Device Farm compares the filter's attribute to the value. For the operators that are supported by each attribute, see the attribute descriptions.

" + "documentation":"

Specifies how Device Farm compares the filter's attribute to the value. See the attribute descriptions.

" }, "values":{ "shape":"DeviceFilterValues", - "documentation":"

An array of one or more filter values used in a device filter.

Operator Values

  • The IN and NOT_IN operators can take a values array that has more than one element.

  • The other operators require an array with a single element.

Attribute Values

  • The PLATFORM attribute can be set to \"ANDROID\" or \"IOS\".

  • The AVAILABILITY attribute can be set to \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

  • The FORM_FACTOR attribute can be set to \"PHONE\" or \"TABLET\".

  • The FLEET_TYPE attribute can be set to \"PUBLIC\" or \"PRIVATE\".

" + "documentation":"

An array of one or more filter values used in a device filter.

Operator Values

  • The IN and NOT_IN operators can take a values array that has more than one element.

  • The other operators require an array with a single element.

Attribute Values

  • The PLATFORM attribute can be set to ANDROID or IOS.

  • The AVAILABILITY attribute can be set to AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

  • The FORM_FACTOR attribute can be set to PHONE or TABLET.

  • The FLEET_TYPE attribute can be set to PUBLIC or PRIVATE.

" } }, - "documentation":"

Represents a device filter used to select a set of devices to be included in a test run. This data structure is passed in as the deviceSelectionConfiguration parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

It is also passed in as the filters parameter to ListDevices. For an example of the JSON request syntax, see ListDevices.

" + "documentation":"

Represents a device filter used to select a set of devices to be included in a test run. This data structure is passed in as the deviceSelectionConfiguration parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

It is also passed in as the filters parameter to ListDevices. For an example of the JSON request syntax, see ListDevices.

" }, "DeviceFilterAttribute":{ "type":"string", @@ -1950,15 +2184,15 @@ }, "deviceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the device.

" + "documentation":"

The ARN of the device.

" }, "labels":{ "shape":"InstanceLabels", - "documentation":"

An array of strings describing the device instance.

" + "documentation":"

An array of strings that describe the device instance.

" }, "status":{ "shape":"InstanceStatus", - "documentation":"

The status of the device instance. Valid values are listed below.

" + "documentation":"

The status of the device instance. Valid values are listed here.

" }, "udid":{ "shape":"String", @@ -1966,7 +2200,7 @@ }, "instanceProfile":{ "shape":"InstanceProfile", - "documentation":"

A object containing information about the instance profile.

" + "documentation":"

A object that contains information about the instance profile.

" } }, "documentation":"

Represents the device instance.

" @@ -2025,7 +2259,7 @@ }, "maxDevices":{ "shape":"Integer", - "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and that meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

" + "documentation":"

The number of devices that Device Farm can add to your device pool. Device Farm adds devices that are available and meet the criteria that you assign for the rules parameter. Depending on how many devices meet these constraints, your device pool might contain fewer devices than the value for this parameter.

By specifying the maximum number of devices, you can control the costs that you incur by running tests.

" } }, "documentation":"

Represents a collection of device types.

" @@ -2035,7 +2269,7 @@ "members":{ "device":{ "shape":"Device", - "documentation":"

The device (phone or tablet) that you wish to return information about.

" + "documentation":"

The device (phone or tablet) to return information about.

" }, "compatible":{ "shape":"Boolean", @@ -2072,14 +2306,14 @@ "members":{ "filters":{ "shape":"DeviceFilters", - "documentation":"

Used to dynamically select a set of devices for a test run. A filter is made up of an attribute, an operator, and one or more values.

  • Attribute

    The aspect of a device such as platform or model used as the selection criteria in a device filter.

    Allowed values include:

    • ARN: The Amazon Resource Name (ARN) of the device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".

    • PLATFORM: The device platform. Valid values are \"ANDROID\" or \"IOS\".

    • OS_VERSION: The operating system version. For example, \"10.3.2\".

    • MODEL: The device model. For example, \"iPad 5th Gen\".

    • AVAILABILITY: The current availability of the device. Valid values are \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

    • FORM_FACTOR: The device form factor. Valid values are \"PHONE\" or \"TABLET\".

    • MANUFACTURER: The device manufacturer. For example, \"Apple\".

    • REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".

    • REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\". This filter will be ignored, as remote debugging is no longer supported.

    • INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.

    • INSTANCE_LABELS: The label of the device instance.

    • FLEET_TYPE: The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".

  • Operator

    The filter operator.

    • The EQUALS operator is available for every attribute except INSTANCE_LABELS.

    • The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.

    • The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.

    • The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.

  • Values

    An array of one or more filter values.

    Operator Values

    • The IN and NOT_IN operators can take a values array that has more than one element.

    • The other operators require an array with a single element.

    Attribute Values

    • The PLATFORM attribute can be set to \"ANDROID\" or \"IOS\".

    • The AVAILABILITY attribute can be set to \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

    • The FORM_FACTOR attribute can be set to \"PHONE\" or \"TABLET\".

    • The FLEET_TYPE attribute can be set to \"PUBLIC\" or \"PRIVATE\".

" + "documentation":"

Used to dynamically select a set of devices for a test run. A filter is made up of an attribute, an operator, and one or more values.

  • Attribute

    The aspect of a device such as platform or model used as the selection criteria in a device filter.

    Allowed values include:

    • ARN: The Amazon Resource Name (ARN) of the device (for example, arn:aws:devicefarm:us-west-2::device:12345Example).

    • PLATFORM: The device platform. Valid values are ANDROID or IOS.

    • OS_VERSION: The operating system version (for example, 10.3.2).

    • MODEL: The device model (for example, iPad 5th Gen).

    • AVAILABILITY: The current availability of the device. Valid values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

    • FORM_FACTOR: The device form factor. Valid values are PHONE or TABLET.

    • MANUFACTURER: The device manufacturer (for example, Apple).

    • REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are TRUE or FALSE.

    • REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are TRUE or FALSE. Because remote debugging is no longer supported, this filter is ignored.

    • INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.

    • INSTANCE_LABELS: The label of the device instance.

    • FLEET_TYPE: The fleet type. Valid values are PUBLIC or PRIVATE.

  • Operator

    The filter operator.

    • The EQUALS operator is available for every attribute except INSTANCE_LABELS.

    • The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.

    • The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.

    • The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.

  • Values

    An array of one or more filter values.

    Operator Values

    • The IN and NOT_IN operators can take a values array that has more than one element.

    • The other operators require an array with a single element.

    Attribute Values

    • The PLATFORM attribute can be set to ANDROID or IOS.

    • The AVAILABILITY attribute can be set to AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

    • The FORM_FACTOR attribute can be set to PHONE or TABLET.

    • The FLEET_TYPE attribute can be set to PUBLIC or PRIVATE.

" }, "maxDevices":{ "shape":"Integer", "documentation":"

The maximum number of devices to be included in a test run.

" } }, - "documentation":"

Represents the device filters used in a test run as well as the maximum number of devices to be included in the run. It is passed in as the deviceSelectionConfiguration request parameter in ScheduleRun.

" + "documentation":"

Represents the device filters used in a test run and the maximum number of devices to be included in the run. It is passed in as the deviceSelectionConfiguration request parameter in ScheduleRun.

" }, "DeviceSelectionResult":{ "type":"structure", @@ -2097,7 +2331,7 @@ "documentation":"

The maximum number of devices to be selected by a device filter and included in a test run.

" } }, - "documentation":"

Contains the run results requested by the device selection configuration as well as how many devices were returned. For an example of the JSON response syntax, see ScheduleRun.

" + "documentation":"

Contains the run results requested by the device selection configuration and how many devices were returned. For an example of the JSON response syntax, see ScheduleRun.

" }, "Devices":{ "type":"list", @@ -2110,23 +2344,23 @@ "members":{ "jobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

The number of minutes a test run will execute before it times out.

" + "documentation":"

The number of minutes a test run executes before it times out.

" }, "accountsCleanup":{ "shape":"AccountsCleanup", - "documentation":"

True if account cleanup is enabled at the beginning of the test; otherwise, false.

" + "documentation":"

True if account cleanup is enabled at the beginning of the test. Otherwise, false.

" }, "appPackagesCleanup":{ "shape":"AppPackagesCleanup", - "documentation":"

True if app package cleanup is enabled at the beginning of the test; otherwise, false.

" + "documentation":"

True if app package cleanup is enabled at the beginning of the test. Otherwise, false.

" }, "videoCapture":{ "shape":"VideoCapture", - "documentation":"

Set to true to enable video capture; otherwise, set to false. The default is true.

" + "documentation":"

Set to true to enable video capture. Otherwise, set to false. The default is true.

" }, "skipAppResign":{ "shape":"SkipAppResign", - "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" + "documentation":"

When set to true, for private devices, Device Farm does not sign your app again. For public devices, Device Farm always signs your apps again.

For more information about how Device Farm re-signs your apps, see Do you modify my app? in the AWS Device Farm FAQs.

" } }, "documentation":"

Represents configuration information about a test run, such as the execution timeout (in minutes).

" @@ -2200,7 +2434,7 @@ "members":{ "deviceInstance":{ "shape":"DeviceInstance", - "documentation":"

An object containing information about your device instance.

" + "documentation":"

An object that contains information about your device instance.

" } } }, @@ -2218,7 +2452,7 @@ }, "testType":{ "shape":"TestType", - "documentation":"

The test type for the specified device pool.

Allowed values include the following:

  • BUILTIN_FUZZ: The built-in fuzz type.

  • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

  • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

  • APPIUM_PYTHON: The Appium Python type.

  • APPIUM_NODE: The Appium Node.js type.

  • APPIUM_RUBY: The Appium Ruby type.

  • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.

  • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.

  • APPIUM_WEB_PYTHON: The Appium Python type for web apps.

  • APPIUM_WEB_NODE: The Appium Node.js type for web apps.

  • APPIUM_WEB_RUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTEST_UI: The Xcode UI test type.

" + "documentation":"

The test type for the specified device pool.

Allowed values include the following:

  • BUILTIN_FUZZ.

  • BUILTIN_EXPLORER. For Android, an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT.

  • APPIUM_JAVA_TESTNG.

  • APPIUM_PYTHON.

  • APPIUM_NODE.

  • APPIUM_RUBY.

  • APPIUM_WEB_JAVA_JUNIT.

  • APPIUM_WEB_JAVA_TESTNG.

  • APPIUM_WEB_PYTHON.

  • APPIUM_WEB_NODE.

  • APPIUM_WEB_RUBY.

  • CALABASH.

  • INSTRUMENTATION.

  • UIAUTOMATION.

  • UIAUTOMATOR.

  • XCTEST.

  • XCTEST_UI.

" }, "test":{ "shape":"ScheduleRunTest", @@ -2226,7 +2460,7 @@ }, "configuration":{ "shape":"ScheduleRunConfiguration", - "documentation":"

An object containing information about the settings for a run.

" + "documentation":"

An object that contains information about the settings for a run.

" } }, "documentation":"

Represents a request to the get device pool compatibility operation.

" @@ -2261,7 +2495,7 @@ "members":{ "devicePool":{ "shape":"DevicePool", - "documentation":"

An object containing information about the requested device pool.

" + "documentation":"

An object that contains information about the requested device pool.

" } }, "documentation":"

Represents the result of a get device pool request.

" @@ -2282,7 +2516,7 @@ "members":{ "device":{ "shape":"Device", - "documentation":"

An object containing information about the requested device.

" + "documentation":"

An object that contains information about the requested device.

" } }, "documentation":"

Represents the result of a get device request.

" @@ -2293,7 +2527,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of your instance profile.

" + "documentation":"

The Amazon Resource Name (ARN) of an instance profile.

" } } }, @@ -2302,7 +2536,7 @@ "members":{ "instanceProfile":{ "shape":"InstanceProfile", - "documentation":"

An object containing information about your instance profile.

" + "documentation":"

An object that contains information about an instance profile.

" } } }, @@ -2322,7 +2556,7 @@ "members":{ "job":{ "shape":"Job", - "documentation":"

An object containing information about the requested job.

" + "documentation":"

An object that contains information about the requested job.

" } }, "documentation":"

Represents the result of a get job request.

" @@ -2333,7 +2567,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the network profile you want to return information about.

" + "documentation":"

The ARN of the network profile to return information about.

" } } }, @@ -2390,7 +2624,7 @@ "members":{ "project":{ "shape":"Project", - "documentation":"

The project you wish to get information about.

" + "documentation":"

The project to get information about.

" } }, "documentation":"

Represents the result of a get project request.

" @@ -2432,7 +2666,7 @@ "members":{ "run":{ "shape":"Run", - "documentation":"

The run you wish to get results from.

" + "documentation":"

The run to get results from.

" } }, "documentation":"

Represents the result of a get run request.

" @@ -2458,6 +2692,51 @@ }, "documentation":"

Represents the result of a get suite request.

" }, + "GetTestGridProjectRequest":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN of the Selenium testing project, from either CreateTestGridProject or ListTestGridProjects.

" + } + } + }, + "GetTestGridProjectResult":{ + "type":"structure", + "members":{ + "testGridProject":{ + "shape":"TestGridProject", + "documentation":"

A TestGridProject.

" + } + } + }, + "GetTestGridSessionRequest":{ + "type":"structure", + "members":{ + "projectArn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN for the project that this session belongs to. See CreateTestGridProject and ListTestGridProjects.

" + }, + "sessionId":{ + "shape":"ResourceId", + "documentation":"

An ID associated with this session.

" + }, + "sessionArn":{ + "shape":"DeviceFarmArn", + "documentation":"

An ARN that uniquely identifies a TestGridSession.

" + } + } + }, + "GetTestGridSessionResult":{ + "type":"structure", + "members":{ + "testGridSession":{ + "shape":"TestGridSession", + "documentation":"

The TestGridSession that was requested.

" + } + } + }, "GetTestRequest":{ "type":"structure", "required":["arn"], @@ -2515,7 +2794,7 @@ "members":{ "vpceConfiguration":{ "shape":"VPCEConfiguration", - "documentation":"

An object containing information about your VPC endpoint configuration.

" + "documentation":"

An object that contains information about your VPC endpoint configuration.

" } } }, @@ -2543,7 +2822,7 @@ }, "type":{ "shape":"DeviceAttribute", - "documentation":"

The type of incompatibility.

Allowed values include:

  • ARN: The ARN.

  • FORM_FACTOR: The form factor (for example, phone or tablet).

  • MANUFACTURER: The manufacturer.

  • PLATFORM: The platform (for example, Android or iOS).

  • REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access.

  • APPIUM_VERSION: The Appium version for the test.

" + "documentation":"

The type of incompatibility.

Allowed values include:

  • ARN

  • FORM_FACTOR (for example, phone or tablet)

  • MANUFACTURER

  • PLATFORM (for example, Android or iOS)

  • REMOTE_ACCESS_ENABLED

  • APPIUM_VERSION

" } }, "documentation":"

Represents information about incompatibility.

" @@ -2565,7 +2844,7 @@ }, "appArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the app about which you are requesting information.

" + "documentation":"

The ARN of the app about which you are requesting information.

" } }, "documentation":"

Represents the request to install an Android application (in .apk format) or an iOS application (in .ipa format) as part of a remote access session.

" @@ -2593,15 +2872,15 @@ }, "packageCleanup":{ "shape":"Boolean", - "documentation":"

When set to true, Device Farm will remove app packages after a test run. The default value is false for private devices.

" + "documentation":"

When set to true, Device Farm removes app packages after a test run. The default value is false for private devices.

" }, "excludeAppPackagesFromCleanup":{ "shape":"PackageIds", - "documentation":"

An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.

The list of packages is only considered if you set packageCleanup to true.

" + "documentation":"

An array of strings containing the list of app packages that should not be cleaned up from the device after a test run completes.

The list of packages is considered only if you set packageCleanup to true.

" }, "rebootAfterUse":{ "shape":"Boolean", - "documentation":"

When set to true, Device Farm will reboot the instance after a test run. The default value is true.

" + "documentation":"

When set to true, Device Farm reboots the instance after a test run. The default value is true.

" }, "name":{ "shape":"Name", @@ -2638,6 +2917,15 @@ "max":64, "min":0 }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

An internal exception was raised in the service. Contact aws-devicefarm-support@amazon.com if you see this error.

", + "exception":true, + "fault":true + }, "InvalidOperationException":{ "type":"structure", "members":{ @@ -2663,7 +2951,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The job's type.

Allowed values include the following:

  • BUILTIN_FUZZ: The built-in fuzz type.

  • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

  • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

  • APPIUM_PYTHON: The Appium Python type.

  • APPIUM_NODE: The Appium Node.js type.

  • APPIUM_RUBY: The Appium Ruby type.

  • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.

  • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.

  • APPIUM_WEB_PYTHON: The Appium Python type for web apps.

  • APPIUM_WEB_NODE: The Appium Node.js type for web apps.

  • APPIUM_WEB_RUBY: The Appium Ruby test type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTEST_UI: The Xcode UI test type.

" + "documentation":"

The job's type.

Allowed values include the following:

  • BUILTIN_FUZZ

  • BUILTIN_EXPLORER. For Android, an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT

  • APPIUM_JAVA_TESTNG

  • APPIUM_PYTHON

  • APPIUM_NODE

  • APPIUM_RUBY

  • APPIUM_WEB_JAVA_JUNIT

  • APPIUM_WEB_JAVA_TESTNG

  • APPIUM_WEB_PYTHON

  • APPIUM_WEB_NODE

  • APPIUM_WEB_RUBY

  • CALABASH

  • INSTRUMENTATION

  • UIAUTOMATION

  • UIAUTOMATOR

  • XCTEST

  • XCTEST_UI

" }, "created":{ "shape":"DateTime", @@ -2671,11 +2959,11 @@ }, "status":{ "shape":"ExecutionStatus", - "documentation":"

The job's status.

Allowed values include:

  • PENDING: A pending status.

  • PENDING_CONCURRENCY: A pending concurrency status.

  • PENDING_DEVICE: A pending device status.

  • PROCESSING: A processing status.

  • SCHEDULING: A scheduling status.

  • PREPARING: A preparing status.

  • RUNNING: A running status.

  • COMPLETED: A completed status.

  • STOPPING: A stopping status.

" + "documentation":"

The job's status.

Allowed values include:

  • PENDING

  • PENDING_CONCURRENCY

  • PENDING_DEVICE

  • PROCESSING

  • SCHEDULING

  • PREPARING

  • RUNNING

  • COMPLETED

  • STOPPING

" }, "result":{ "shape":"ExecutionResult", - "documentation":"

The job's result.

Allowed values include:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

The job's result.

Allowed values include:

  • PENDING

  • PASSED

  • WARNED

  • FAILED

  • SKIPPED

  • ERRORED

  • STOPPED

" }, "started":{ "shape":"DateTime", @@ -2699,7 +2987,7 @@ }, "instanceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the instance.

" + "documentation":"

The ARN of the instance.

" }, "deviceMinutes":{ "shape":"DeviceMinutes", @@ -2711,7 +2999,7 @@ }, "videoCapture":{ "shape":"VideoCapture", - "documentation":"

This value is set to true if video capture is enabled; otherwise, it is set to false.

" + "documentation":"

This value is set to true if video capture is enabled. Otherwise, it is set to false.

" } }, "documentation":"

Represents a device.

" @@ -2741,11 +3029,11 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Run, Job, Suite, or Test ARN.

" + "documentation":"

The run, job, suite, or test ARN.

" }, "type":{ "shape":"ArtifactCategory", - "documentation":"

The artifacts' type.

Allowed values include:

  • FILE: The artifacts are files.

  • LOG: The artifacts are logs.

  • SCREENSHOT: The artifacts are screenshots.

" + "documentation":"

The artifacts' type.

Allowed values include:

  • FILE

  • LOG

  • SCREENSHOT

" }, "nextToken":{ "shape":"PaginationToken", @@ -2763,7 +3051,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list artifacts operation.

" @@ -2773,7 +3061,7 @@ "members":{ "maxResults":{ "shape":"Integer", - "documentation":"

An integer specifying the maximum number of items you want to return in the API response.

" + "documentation":"

An integer that specifies the maximum number of items you want to return in the API response.

" }, "nextToken":{ "shape":"PaginationToken", @@ -2786,7 +3074,7 @@ "members":{ "deviceInstances":{ "shape":"DeviceInstances", - "documentation":"

An object containing information about your device instances.

" + "documentation":"

An object that contains information about your device instances.

" }, "nextToken":{ "shape":"PaginationToken", @@ -2822,7 +3110,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list device pools request.

" @@ -2840,7 +3128,7 @@ }, "filters":{ "shape":"DeviceFilters", - "documentation":"

Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.

  • Attribute: The aspect of a device such as platform or model used as the selection criteria in a device filter.

    Allowed values include:

    • ARN: The Amazon Resource Name (ARN) of the device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".

    • PLATFORM: The device platform. Valid values are \"ANDROID\" or \"IOS\".

    • OS_VERSION: The operating system version. For example, \"10.3.2\".

    • MODEL: The device model. For example, \"iPad 5th Gen\".

    • AVAILABILITY: The current availability of the device. Valid values are \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

    • FORM_FACTOR: The device form factor. Valid values are \"PHONE\" or \"TABLET\".

    • MANUFACTURER: The device manufacturer. For example, \"Apple\".

    • REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".

    • REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\". This attribute will be ignored, as remote debugging is no longer supported.

    • INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.

    • INSTANCE_LABELS: The label of the device instance.

    • FLEET_TYPE: The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".

  • Operator: The filter operator.

    • The EQUALS operator is available for every attribute except INSTANCE_LABELS.

    • The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.

    • The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.

    • The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.

  • Values: An array of one or more filter values.

    • The IN and NOT_IN operators take a values array that has one or more elements.

    • The other operators require an array with a single element.

    • In a request, the AVAILABILITY attribute takes \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\" as values.

" + "documentation":"

Used to select a set of devices. A filter is made up of an attribute, an operator, and one or more values.

  • Attribute: The aspect of a device such as platform or model used as the selection criteria in a device filter.

    Allowed values include:

    • ARN: The Amazon Resource Name (ARN) of the device (for example, arn:aws:devicefarm:us-west-2::device:12345Example).

    • PLATFORM: The device platform. Valid values are ANDROID or IOS.

    • OS_VERSION: The operating system version (for example, 10.3.2).

    • MODEL: The device model (for example, iPad 5th Gen).

    • AVAILABILITY: The current availability of the device. Valid values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

    • FORM_FACTOR: The device form factor. Valid values are PHONE or TABLET.

    • MANUFACTURER: The device manufacturer (for example, Apple).

    • REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid values are TRUE or FALSE.

    • REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid values are TRUE or FALSE. Because remote debugging is no longer supported, this attribute is ignored.

    • INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance.

    • INSTANCE_LABELS: The label of the device instance.

    • FLEET_TYPE: The fleet type. Valid values are PUBLIC or PRIVATE.

  • Operator: The filter operator.

    • The EQUALS operator is available for every attribute except INSTANCE_LABELS.

    • The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes.

    • The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes.

    • The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also available for the OS_VERSION attribute.

  • Values: An array of one or more filter values.

    • The IN and NOT_IN operators take a values array that has one or more elements.

    • The other operators require an array with a single element.

    • In a request, the AVAILABILITY attribute takes the following values: AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

" } }, "documentation":"

Represents the result of a list devices request.

" @@ -2854,7 +3142,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list devices operation.

" @@ -2864,7 +3152,7 @@ "members":{ "maxResults":{ "shape":"Integer", - "documentation":"

An integer specifying the maximum number of items you want to return in the API response.

" + "documentation":"

An integer that specifies the maximum number of items you want to return in the API response.

" }, "nextToken":{ "shape":"PaginationToken", @@ -2877,7 +3165,7 @@ "members":{ "instanceProfiles":{ "shape":"InstanceProfiles", - "documentation":"

An object containing information about your instance profiles.

" + "documentation":"

An object that contains information about your instance profiles.

" }, "nextToken":{ "shape":"PaginationToken", @@ -2909,7 +3197,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list jobs request.

" @@ -2924,7 +3212,7 @@ }, "type":{ "shape":"NetworkProfileType", - "documentation":"

The type of network profile you wish to return information about. Valid values are listed below.

" + "documentation":"

The type of network profile to return information about. Valid values are listed here.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3006,7 +3294,7 @@ "members":{ "offerings":{ "shape":"Offerings", - "documentation":"

A value representing the list offering results.

" + "documentation":"

A value that represents the list offering results.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3038,7 +3326,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list projects request.

" @@ -3063,7 +3351,7 @@ "members":{ "remoteAccessSessions":{ "shape":"RemoteAccessSessions", - "documentation":"

A container representing the metadata from the service about each remote access session you are requesting.

" + "documentation":"

A container that represents the metadata from the service about each remote access session you are requesting.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3096,7 +3384,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list runs request.

" @@ -3125,7 +3413,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list samples request.

" @@ -3154,7 +3442,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list suites request.

" @@ -3164,8 +3452,8 @@ "required":["ResourceARN"], "members":{ "ResourceARN":{ - "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the resource(s) for which to list tags. You can associate tags with the following Device Farm resources: PROJECT, RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, DEVICE, and VPCE_CONFIGURATION.

" + "shape":"DeviceFarmArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource or resources for which to list tags. You can associate tags with the following Device Farm resources: PROJECT, RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, DEVICE, and VPCE_CONFIGURATION.

" } } }, @@ -3174,7 +3462,150 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters. Tag values can have a maximum length of 256 characters.

" + } + } + }, + "ListTestGridProjectsRequest":{ + "type":"structure", + "members":{ + "maxResult":{ + "shape":"MaxPageSize", + "documentation":"

Return no more than this number of results.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

From a response, used to continue a paginated listing.

" + } + } + }, + "ListTestGridProjectsResult":{ + "type":"structure", + "members":{ + "testGridProjects":{ + "shape":"TestGridProjects", + "documentation":"

The list of TestGridProjects, based on a ListTestGridProjectsRequest.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Used for pagination. Pass into ListTestGridProjects to get more results in a paginated request.

" + } + } + }, + "ListTestGridSessionActionsRequest":{ + "type":"structure", + "required":["sessionArn"], + "members":{ + "sessionArn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN of the session to retrieve.

" + }, + "maxResult":{ + "shape":"MaxPageSize", + "documentation":"

The maximum number of sessions to return per response.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token.

" + } + } + }, + "ListTestGridSessionActionsResult":{ + "type":"structure", + "members":{ + "actions":{ + "shape":"TestGridSessionActions", + "documentation":"

The action taken by the session.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token.

" + } + } + }, + "ListTestGridSessionArtifactsRequest":{ + "type":"structure", + "required":["sessionArn"], + "members":{ + "sessionArn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN of a TestGridSession.

" + }, + "type":{ + "shape":"TestGridSessionArtifactCategory", + "documentation":"

Limit results to a specified type of artifact.

" + }, + "maxResult":{ + "shape":"MaxPageSize", + "documentation":"

The maximum number of results to be returned by a request.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token.

" + } + } + }, + "ListTestGridSessionArtifactsResult":{ + "type":"structure", + "members":{ + "artifacts":{ + "shape":"TestGridSessionArtifacts", + "documentation":"

A list of test grid session artifacts for a TestGridSession.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token.

" + } + } + }, + "ListTestGridSessionsRequest":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{ + "shape":"DeviceFarmArn", + "documentation":"

ARN of a TestGridProject.

" + }, + "status":{ + "shape":"TestGridSessionStatus", + "documentation":"

Return only sessions in this state.

" + }, + "creationTimeAfter":{ + "shape":"DateTime", + "documentation":"

Return only sessions created after this time.

" + }, + "creationTimeBefore":{ + "shape":"DateTime", + "documentation":"

Return only sessions created before this time.

" + }, + "endTimeAfter":{ + "shape":"DateTime", + "documentation":"

Return only sessions that ended after this time.

" + }, + "endTimeBefore":{ + "shape":"DateTime", + "documentation":"

Return only sessions that ended before this time.

" + }, + "maxResult":{ + "shape":"MaxPageSize", + "documentation":"

Return only this many results at a time.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token.

" + } + } + }, + "ListTestGridSessionsResult":{ + "type":"structure", + "members":{ + "testGridSessions":{ + "shape":"TestGridSessions", + "documentation":"

The sessions that match the criteria in a ListTestGridSessionsRequest.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token.

" } } }, @@ -3202,7 +3633,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list tests request.

" @@ -3227,11 +3658,11 @@ "members":{ "uniqueProblems":{ "shape":"UniqueProblemsByExecutionResultMap", - "documentation":"

Information about the unique problems.

Allowed values include:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

Information about the unique problems.

Allowed values include:

  • PENDING

  • PASSED

  • WARNED

  • FAILED

  • SKIPPED

  • ERRORED

  • STOPPED

" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list unique problems request.

" @@ -3246,7 +3677,7 @@ }, "type":{ "shape":"UploadType", - "documentation":"

The type of upload.

Must be one of the following values:

  • ANDROID_APP: An Android upload.

  • IOS_APP: An iOS upload.

  • WEB_APP: A web application upload.

  • EXTERNAL_DATA: An external data upload.

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

  • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

  • APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.

  • APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for a web app.

  • APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for a web app.

  • APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for a web app.

  • CALABASH_TEST_PACKAGE: A Calabash test package upload.

  • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

  • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

  • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

  • XCTEST_TEST_PACKAGE: An Xcode test package upload.

  • XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload.

  • APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.

  • APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.

  • APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.

  • APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.

  • APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.

  • APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.

  • APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.

  • XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload.

" + "documentation":"

The type of upload.

Must be one of the following values:

  • ANDROID_APP

  • IOS_APP

  • WEB_APP

  • EXTERNAL_DATA

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE

  • APPIUM_PYTHON_TEST_PACKAGE

  • APPIUM_NODE_TEST_PACKAGE

  • APPIUM_RUBY_TEST_PACKAGE

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE

  • APPIUM_WEB_PYTHON_TEST_PACKAGE

  • APPIUM_WEB_NODE_TEST_PACKAGE

  • APPIUM_WEB_RUBY_TEST_PACKAGE

  • CALABASH_TEST_PACKAGE

  • INSTRUMENTATION_TEST_PACKAGE

  • UIAUTOMATION_TEST_PACKAGE

  • UIAUTOMATOR_TEST_PACKAGE

  • XCTEST_TEST_PACKAGE

  • XCTEST_UI_TEST_PACKAGE

  • APPIUM_JAVA_JUNIT_TEST_SPEC

  • APPIUM_JAVA_TESTNG_TEST_SPEC

  • APPIUM_PYTHON_TEST_SPEC

  • APPIUM_NODE_TEST_SPEC

  • APPIUM_RUBY_TEST_SPEC

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC

  • APPIUM_WEB_PYTHON_TEST_SPEC

  • APPIUM_WEB_NODE_TEST_SPEC

  • APPIUM_WEB_RUBY_TEST_SPEC

  • INSTRUMENTATION_TEST_SPEC

  • XCTEST_UI_TEST_SPEC

" }, "nextToken":{ "shape":"PaginationToken", @@ -3264,7 +3695,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + "documentation":"

If the number of items that are returned is significantly large, this is an identifier that is also returned. It can be used in a subsequent call to this operation to return the next set of items in the list.

" } }, "documentation":"

Represents the result of a list uploads request.

" @@ -3274,7 +3705,7 @@ "members":{ "maxResults":{ "shape":"Integer", - "documentation":"

An integer specifying the maximum number of items you want to return in the API response.

" + "documentation":"

An integer that specifies the maximum number of items you want to return in the API response.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3287,7 +3718,7 @@ "members":{ "vpceConfigurations":{ "shape":"VPCEConfigurations", - "documentation":"

An array of VPCEConfiguration objects containing information about your VPC endpoint configuration.

" + "documentation":"

An array of VPCEConfiguration objects that contain information about your VPC endpoint configuration.

" }, "nextToken":{ "shape":"PaginationToken", @@ -3311,9 +3742,14 @@ "documentation":"

The longitude.

" } }, - "documentation":"

Represents a latitude and longitude pair, expressed in geographic coordinate system degrees (for example 47.6204, -122.3491).

Elevation is currently not supported.

" + "documentation":"

Represents a latitude and longitude pair, expressed in geographic coordinate system degrees (for example, 47.6204, -122.3491).

Elevation is currently not supported.

" }, "Long":{"type":"long"}, + "MaxPageSize":{ + "type":"integer", + "max":1000, + "min":1 + }, "MaxSlotMap":{ "type":"map", "key":{"shape":"String"}, @@ -3338,10 +3774,10 @@ }, "currencyCode":{ "shape":"CurrencyCode", - "documentation":"

The currency code of a monetary amount. For example, USD means \"U.S. dollars.\"

" + "documentation":"

The currency code of a monetary amount. For example, USD means U.S. dollars.

" } }, - "documentation":"

A number representing the monetary amount for an offering or transaction.

" + "documentation":"

A number that represents the monetary amount for an offering or transaction.

" }, "Name":{ "type":"string", @@ -3365,7 +3801,7 @@ }, "type":{ "shape":"NetworkProfileType", - "documentation":"

The type of network profile. Valid values are listed below.

" + "documentation":"

The type of network profile. Valid values are listed here.

" }, "uplinkBandwidthBits":{ "shape":"Long", @@ -3444,15 +3880,15 @@ }, "description":{ "shape":"Message", - "documentation":"

A string describing the offering.

" + "documentation":"

A string that describes the offering.

" }, "type":{ "shape":"OfferingType", - "documentation":"

The type of offering (e.g., \"RECURRING\") for a device.

" + "documentation":"

The type of offering (for example, RECURRING) for a device.

" }, "platform":{ "shape":"DevicePlatform", - "documentation":"

The platform of the device (e.g., ANDROID or IOS).

" + "documentation":"

The platform of the device (for example, ANDROID or IOS).

" }, "recurringCharges":{ "shape":"RecurringCharges", @@ -3474,7 +3910,7 @@ }, "description":{ "shape":"Message", - "documentation":"

A string describing the offering promotion.

" + "documentation":"

A string that describes the offering promotion.

" } }, "documentation":"

Represents information about an offering promotion.

" @@ -3599,7 +4035,7 @@ }, "result":{ "shape":"ExecutionResult", - "documentation":"

The problem's result.

Allowed values include:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

The problem's result.

Allowed values include:

  • PENDING

  • PASSED

  • WARNED

  • FAILED

  • SKIPPED

  • ERRORED

  • STOPPED

" }, "message":{ "shape":"Message", @@ -3639,7 +4075,7 @@ }, "defaultJobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

The default number of minutes (at the project level) a test run will execute before it times out. The default value is 150 minutes.

" + "documentation":"

The default number of minutes (at the project level) a test run executes before it times out. The default value is 150 minutes.

" }, "created":{ "shape":"DateTime", @@ -3661,7 +4097,7 @@ }, "quantity":{ "shape":"Integer", - "documentation":"

The number of device slots you wish to purchase in an offering request.

" + "documentation":"

The number of device slots to purchase in an offering request.

" }, "offeringPromotionId":{ "shape":"OfferingPromotionIdentifier", @@ -3678,7 +4114,7 @@ "documentation":"

Represents the offering transaction for the purchase result.

" } }, - "documentation":"

The result of the purchase offering (e.g., success or failure).

" + "documentation":"

The result of the purchase offering (for example, success or failure).

" }, "PurchasedDevicesMap":{ "type":"map", @@ -3690,19 +4126,19 @@ "members":{ "wifi":{ "shape":"Boolean", - "documentation":"

True if Wi-Fi is enabled at the beginning of the test; otherwise, false.

" + "documentation":"

True if Wi-Fi is enabled at the beginning of the test. Otherwise, false.

" }, "bluetooth":{ "shape":"Boolean", - "documentation":"

True if Bluetooth is enabled at the beginning of the test; otherwise, false.

" + "documentation":"

True if Bluetooth is enabled at the beginning of the test. Otherwise, false.

" }, "nfc":{ "shape":"Boolean", - "documentation":"

True if NFC is enabled at the beginning of the test; otherwise, false.

" + "documentation":"

True if NFC is enabled at the beginning of the test. Otherwise, false.

" }, "gps":{ "shape":"Boolean", - "documentation":"

True if GPS is enabled at the beginning of the test; otherwise, false.

" + "documentation":"

True if GPS is enabled at the beginning of the test. Otherwise, false.

" } }, "documentation":"

Represents the set of radios and their states on a device. Examples of radios include Wi-Fi, GPS, Bluetooth, and NFC.

" @@ -3716,10 +4152,10 @@ }, "frequency":{ "shape":"RecurringChargeFrequency", - "documentation":"

The frequency in which charges will recur.

" + "documentation":"

The frequency in which charges recur.

" } }, - "documentation":"

Specifies whether charges for devices will be recurring.

" + "documentation":"

Specifies whether charges for devices are recurring.

" }, "RecurringChargeFrequency":{ "type":"string", @@ -3746,11 +4182,11 @@ }, "status":{ "shape":"ExecutionStatus", - "documentation":"

The status of the remote access session. Can be any of the following:

  • PENDING: A pending status.

  • PENDING_CONCURRENCY: A pending concurrency status.

  • PENDING_DEVICE: A pending device status.

  • PROCESSING: A processing status.

  • SCHEDULING: A scheduling status.

  • PREPARING: A preparing status.

  • RUNNING: A running status.

  • COMPLETED: A completed status.

  • STOPPING: A stopping status.

" + "documentation":"

The status of the remote access session. Can be any of the following:

  • PENDING.

  • PENDING_CONCURRENCY.

  • PENDING_DEVICE.

  • PROCESSING.

  • SCHEDULING.

  • PREPARING.

  • RUNNING.

  • COMPLETED.

  • STOPPING.

" }, "result":{ "shape":"ExecutionResult", - "documentation":"

The result of the remote access session. Can be any of the following:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

The result of the remote access session. Can be any of the following:

  • PENDING.

  • PASSED.

  • WARNED.

  • FAILED.

  • SKIPPED.

  • ERRORED.

  • STOPPED.

" }, "message":{ "shape":"Message", @@ -3770,11 +4206,11 @@ }, "instanceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the instance.

" + "documentation":"

The ARN of the instance.

" }, "remoteDebugEnabled":{ "shape":"Boolean", - "documentation":"

This flag is set to true if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" + "documentation":"

This flag is set to true if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" }, "remoteRecordEnabled":{ "shape":"Boolean", @@ -3782,19 +4218,19 @@ }, "remoteRecordAppArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) for the app to be recorded in the remote access session.

" + "documentation":"

The ARN for the app to be recorded in the remote access session.

" }, "hostAddress":{ "shape":"HostAddress", - "documentation":"

IP address of the EC2 host where you need to connect to remotely debug devices. Only returned if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" + "documentation":"

IP address of the EC2 host where you need to connect to remotely debug devices. Only returned if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" }, "clientId":{ "shape":"ClientId", - "documentation":"

Unique identifier of your client for the remote access session. Only returned if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" + "documentation":"

Unique identifier of your client for the remote access session. Only returned if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" }, "billingMethod":{ "shape":"BillingMethod", - "documentation":"

The billing method of the remote access session. Possible values include METERED or UNMETERED. For more information about metered devices, see AWS Device Farm terminology.\"

" + "documentation":"

The billing method of the remote access session. Possible values include METERED or UNMETERED. For more information about metered devices, see AWS Device Farm terminology.

" }, "deviceMinutes":{ "shape":"DeviceMinutes", @@ -3806,15 +4242,15 @@ }, "deviceUdid":{ "shape":"String", - "documentation":"

Unique device identifier for the remote device. Only returned if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" + "documentation":"

Unique device identifier for the remote device. Only returned if remote debugging is enabled for the remote access session.

Remote debugging is no longer supported.

" }, "interactionMode":{ "shape":"InteractionMode", - "documentation":"

The interaction mode of the remote access session. Valid values are:

  • INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You cannot run XCUITest framework-based tests in this mode.

  • NO_VIDEO: You are connected to the device but cannot interact with it or view the screen. This mode has the fastest test execution speed. You can run XCUITest framework-based tests in this mode.

  • VIDEO_ONLY: You can view the screen but cannot touch or rotate it. You can run XCUITest framework-based tests and watch the screen in this mode.

" + "documentation":"

The interaction mode of the remote access session. Valid values are:

  • INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You cannot run XCUITest framework-based tests in this mode.

  • NO_VIDEO: You are connected to the device, but cannot interact with it or view the screen. This mode has the fastest test execution speed. You can run XCUITest framework-based tests in this mode.

  • VIDEO_ONLY: You can view the screen, but cannot touch or rotate it. You can run XCUITest framework-based tests and watch the screen in this mode.

" }, "skipAppResign":{ "shape":"SkipAppResign", - "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" + "documentation":"

When set to true, for private devices, Device Farm does not sign your app again. For public devices, Device Farm always signs your apps again.

For more information about how Device Farm re-signs your apps, see Do you modify my app? in the AWS Device Farm FAQs.

" } }, "documentation":"

Represents information about the remote access session.

" @@ -3835,7 +4271,7 @@ "documentation":"

The quantity requested in an offering renewal.

" } }, - "documentation":"

A request representing an offering renewal.

" + "documentation":"

A request that represents an offering renewal.

" }, "RenewOfferingResult":{ "type":"structure", @@ -3861,12 +4297,30 @@ }, "documentation":"

Represents the screen resolution of a device in height and width, expressed in pixels.

" }, + "ResourceDescription":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*\\S.*" + }, + "ResourceId":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, + "ResourceName":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*\\S.*" + }, "Rule":{ "type":"structure", "members":{ "attribute":{ "shape":"DeviceAttribute", - "documentation":"

The rule's stringified attribute. For example, specify the value as \"\\\"abc\\\"\".

The supported operators for each attribute are provided in the following list.

APPIUM_VERSION

The Appium version for the test.

Supported operators: CONTAINS

ARN

The Amazon Resource Name (ARN) of the device. For example, \"arn:aws:devicefarm:us-west-2::device:12345Example\".

Supported operators: EQUALS, IN, NOT_IN

AVAILABILITY

The current availability of the device. Valid values are \"AVAILABLE\", \"HIGHLY_AVAILABLE\", \"BUSY\", or \"TEMPORARY_NOT_AVAILABLE\".

Supported operators: EQUALS

FLEET_TYPE

The fleet type. Valid values are \"PUBLIC\" or \"PRIVATE\".

Supported operators: EQUALS

FORM_FACTOR

The device form factor. Valid values are \"PHONE\" or \"TABLET\".

Supported operators: EQUALS, IN, NOT_IN

INSTANCE_ARN

The Amazon Resource Name (ARN) of the device instance.

Supported operators: IN, NOT_IN

INSTANCE_LABELS

The label of the device instance.

Supported operators: CONTAINS

MANUFACTURER

The device manufacturer. For example, \"Apple\".

Supported operators: EQUALS, IN, NOT_IN

MODEL

The device model, such as \"Apple iPad Air 2\" or \"Google Pixel\".

Supported operators: CONTAINS, EQUALS, IN, NOT_IN

OS_VERSION

The operating system version. For example, \"10.3.2\".

Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, LESS_THAN_OR_EQUALS, NOT_IN

PLATFORM

The device platform. Valid values are \"ANDROID\" or \"IOS\".

Supported operators: EQUALS, IN, NOT_IN

REMOTE_ACCESS_ENABLED

Whether the device is enabled for remote access. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

REMOTE_DEBUG_ENABLED

Whether the device is enabled for remote debugging. Valid values are \"TRUE\" or \"FALSE\".

Supported operators: EQUALS

This filter will be ignored, as remote debugging is no longer supported.

" + "documentation":"

The rule's stringified attribute. For example, specify the value as \"\\\"abc\\\"\".

The supported operators for each attribute are provided in the following list.

APPIUM_VERSION

The Appium version for the test.

Supported operators: CONTAINS

ARN

The Amazon Resource Name (ARN) of the device (for example, arn:aws:devicefarm:us-west-2::device:12345Example.

Supported operators: EQUALS, IN, NOT_IN

AVAILABILITY

The current availability of the device. Valid values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE.

Supported operators: EQUALS

FLEET_TYPE

The fleet type. Valid values are PUBLIC or PRIVATE.

Supported operators: EQUALS

FORM_FACTOR

The device form factor. Valid values are PHONE or TABLET.

Supported operators: EQUALS, IN, NOT_IN

INSTANCE_ARN

The Amazon Resource Name (ARN) of the device instance.

Supported operators: IN, NOT_IN

INSTANCE_LABELS

The label of the device instance.

Supported operators: CONTAINS

MANUFACTURER

The device manufacturer (for example, Apple).

Supported operators: EQUALS, IN, NOT_IN

MODEL

The device model, such as Apple iPad Air 2 or Google Pixel.

Supported operators: CONTAINS, EQUALS, IN, NOT_IN

OS_VERSION

The operating system version (for example, 10.3.2).

Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, LESS_THAN_OR_EQUALS, NOT_IN

PLATFORM

The device platform. Valid values are ANDROID or IOS.

Supported operators: EQUALS, IN, NOT_IN

REMOTE_ACCESS_ENABLED

Whether the device is enabled for remote access. Valid values are TRUE or FALSE.

Supported operators: EQUALS

REMOTE_DEBUG_ENABLED

Whether the device is enabled for remote debugging. Valid values are TRUE or FALSE.

Supported operators: EQUALS

Because remote debugging is no longer supported, this filter is ignored.

" }, "operator":{ "shape":"RuleOperator", @@ -3909,11 +4363,11 @@ }, "type":{ "shape":"TestType", - "documentation":"

The run's type.

Must be one of the following values:

  • BUILTIN_FUZZ: The built-in fuzz type.

  • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

  • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

  • APPIUM_PYTHON: The Appium Python type.

  • APPIUM_NODE: The Appium Node.js type.

  • APPIUM_RUBY: The Appium Ruby type.

  • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.

  • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.

  • APPIUM_WEB_PYTHON: The Appium Python type for web apps.

  • APPIUM_WEB_NODE: The Appium Node.js type for web apps.

  • APPIUM_WEB_RUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTEST_UI: The Xcode UI test type.

" + "documentation":"

The run's type.

Must be one of the following values:

  • BUILTIN_FUZZ

  • BUILTIN_EXPLORER

    For Android, an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT

  • APPIUM_JAVA_TESTNG

  • APPIUM_PYTHON

  • APPIUM_NODE

  • APPIUM_RUBY

  • APPIUM_WEB_JAVA_JUNIT

  • APPIUM_WEB_JAVA_TESTNG

  • APPIUM_WEB_PYTHON

  • APPIUM_WEB_NODE

  • APPIUM_WEB_RUBY

  • CALABASH

  • INSTRUMENTATION

  • UIAUTOMATION

  • UIAUTOMATOR

  • XCTEST

  • XCTEST_UI

" }, "platform":{ "shape":"DevicePlatform", - "documentation":"

The run's platform.

Allowed values include:

  • ANDROID: The Android platform.

  • IOS: The iOS platform.

" + "documentation":"

The run's platform.

Allowed values include:

  • ANDROID

  • IOS

" }, "created":{ "shape":"DateTime", @@ -3921,11 +4375,11 @@ }, "status":{ "shape":"ExecutionStatus", - "documentation":"

The run's status.

Allowed values include:

  • PENDING: A pending status.

  • PENDING_CONCURRENCY: A pending concurrency status.

  • PENDING_DEVICE: A pending device status.

  • PROCESSING: A processing status.

  • SCHEDULING: A scheduling status.

  • PREPARING: A preparing status.

  • RUNNING: A running status.

  • COMPLETED: A completed status.

  • STOPPING: A stopping status.

" + "documentation":"

The run's status.

Allowed values include:

  • PENDING

  • PENDING_CONCURRENCY

  • PENDING_DEVICE

  • PROCESSING

  • SCHEDULING

  • PREPARING

  • RUNNING

  • COMPLETED

  • STOPPING

" }, "result":{ "shape":"ExecutionResult", - "documentation":"

The run's result.

Allowed values include:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

The run's result.

Allowed values include:

  • PENDING

  • PASSED

  • WARNED

  • FAILED

  • SKIPPED

  • ERRORED

  • STOPPED

" }, "started":{ "shape":"DateTime", @@ -3953,7 +4407,7 @@ }, "billingMethod":{ "shape":"BillingMethod", - "documentation":"

Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is metered.

" + "documentation":"

Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is metered.

If you have unmetered device slots, you must set this to unmetered to use them. Otherwise, the run is counted toward metered device minutes.

" }, "deviceMinutes":{ "shape":"DeviceMinutes", @@ -3965,7 +4419,7 @@ }, "parsingResultUrl":{ "shape":"String", - "documentation":"

Read-only URL for an object in S3 bucket where you can get the parsing results of the test package. If the test package doesn't parse, the reason why it doesn't parse appears in the file that this URL points to.

" + "documentation":"

Read-only URL for an object in an S3 bucket where you can get the parsing results of the test package. If the test package doesn't parse, the reason why it doesn't parse appears in the file that this URL points to.

" }, "resultCode":{ "shape":"ExecutionResultCode", @@ -3985,7 +4439,7 @@ }, "jobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

The number of minutes the job will execute before it times out.

" + "documentation":"

The number of minutes the job executes before it times out.

" }, "devicePoolArn":{ "shape":"AmazonResourceName", @@ -4013,7 +4467,7 @@ }, "skipAppResign":{ "shape":"SkipAppResign", - "documentation":"

When set to true, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.

For more information about how Device Farm re-signs your app(s), see Do you modify my app? in the AWS Device Farm FAQs.

" + "documentation":"

When set to true, for private devices, Device Farm does not sign your app again. For public devices, Device Farm always signs your apps again.

For more information about how Device Farm re-signs your apps, see Do you modify my app? in the AWS Device Farm FAQs.

" }, "testSpecArn":{ "shape":"AmazonResourceName", @@ -4024,7 +4478,7 @@ "documentation":"

The results of a device filter used to select the devices for a test run.

" } }, - "documentation":"

Represents a test run on a set of devices with a given app package, test parameters, etc.

" + "documentation":"

Represents a test run on a set of devices with a given app package, test parameters, and so on.

" }, "Runs":{ "type":"list", @@ -4043,7 +4497,7 @@ }, "url":{ "shape":"URL", - "documentation":"

The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the sample's file.

" + "documentation":"

The presigned Amazon S3 URL that can be used with a GET request to download the sample's file.

" } }, "documentation":"

Represents a sample of performance data.

" @@ -4079,7 +4533,7 @@ "members":{ "extraDataPackageArn":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.

" + "documentation":"

The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm extracts to external data for Android or the app's sandbox for iOS.

" }, "networkProfileArn":{ "shape":"AmazonResourceName", @@ -4095,7 +4549,7 @@ }, "vpceConfigurationArns":{ "shape":"AmazonResourceNames", - "documentation":"

An array of Amazon Resource Names (ARNs) for your VPC endpoint configurations.

" + "documentation":"

An array of ARNs for your VPC endpoint configurations.

" }, "customerArtifactPaths":{ "shape":"CustomerArtifactPaths", @@ -4107,11 +4561,11 @@ }, "auxiliaryApps":{ "shape":"AmazonResourceNames", - "documentation":"

A list of Upload ARNs for app packages that will be installed alongside your app.

" + "documentation":"

A list of upload ARNs for app packages to be installed with your app.

" }, "billingMethod":{ "shape":"BillingMethod", - "documentation":"

Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is metered.

" + "documentation":"

Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is metered.

If you have purchased unmetered device slots, you must set this parameter to unmetered to make use of them. Otherwise, your run counts against your metered time.

" } }, "documentation":"

Represents the settings for a run. Includes things like location, radio states, auxiliary apps, and network profiles.

" @@ -4129,7 +4583,7 @@ }, "appArn":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the app to schedule a run.

" + "documentation":"

The ARN of an application package to run tests against, created with CreateUpload. See ListUploads.

" }, "devicePoolArn":{ "shape":"AmazonResourceName", @@ -4137,7 +4591,7 @@ }, "deviceSelectionConfiguration":{ "shape":"DeviceSelectionConfiguration", - "documentation":"

The filter criteria used to dynamically select a set of devices for a test run, as well as the maximum number of devices to be included in the run.

Either devicePoolArn or deviceSelectionConfiguration is required in a request.

" + "documentation":"

The filter criteria used to dynamically select a set of devices for a test run and the maximum number of devices to be included in the run.

Either devicePoolArn or deviceSelectionConfiguration is required in a request.

" }, "name":{ "shape":"Name", @@ -4174,11 +4628,11 @@ "members":{ "type":{ "shape":"TestType", - "documentation":"

The test's type.

Must be one of the following values:

  • BUILTIN_FUZZ: The built-in fuzz type.

  • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

  • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

  • APPIUM_PYTHON: The Appium Python type.

  • APPIUM_NODE: The Appium Node.js type.

  • APPIUM_RUBY: The Appium Ruby type.

  • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.

  • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.

  • APPIUM_WEB_PYTHON: The Appium Python type for web apps.

  • APPIUM_WEB_NODE: The Appium Node.js type for web apps.

  • APPIUM_WEB_RUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTEST_UI: The Xcode UI test type.

" + "documentation":"

The test's type.

Must be one of the following values:

  • BUILTIN_FUZZ

  • BUILTIN_EXPLORER. For Android, an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT

  • APPIUM_JAVA_TESTNG

  • APPIUM_PYTHON

  • APPIUM_NODE

  • APPIUM_RUBY

  • APPIUM_WEB_JAVA_JUNIT

  • APPIUM_WEB_JAVA_TESTNG

  • APPIUM_WEB_PYTHON

  • APPIUM_WEB_NODE

  • APPIUM_WEB_RUBY

  • CALABASH

  • INSTRUMENTATION

  • UIAUTOMATION

  • UIAUTOMATOR

  • XCTEST

  • XCTEST_UI

" }, "testPackageArn":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the uploaded test that will be run.

" + "documentation":"

The ARN of the uploaded test to be run.

" }, "testSpecArn":{ "shape":"AmazonResourceName", @@ -4190,10 +4644,10 @@ }, "parameters":{ "shape":"TestParameters", - "documentation":"

The test's parameters, such as test framework parameters and fixture settings. Parameters are represented by name-value pairs of strings.

For all tests:

  • app_performance_monitoring: Performance monitoring is enabled by default. Set this parameter to \"false\" to disable it.

For Calabash tests:

  • profile: A cucumber profile, for example, \"my_profile_name\".

  • tags: You can limit execution to features or scenarios that have (or don't have) certain tags, for example, \"@smoke\" or \"@smoke,~@wip\".

For Appium tests (all types):

  • appium_version: The Appium version. Currently supported values are \"1.6.5\" (and higher), \"latest\", and \"default\".

    • “latest” will run the latest Appium version supported by Device Farm (1.9.1).

    • For “default”, Device Farm will choose a compatible version of Appium for the device. The current behavior is to run 1.7.2 on Android devices and iOS 9 and earlier, 1.7.2 for iOS 10 and later.

    • This behavior is subject to change.

For Fuzz tests (Android only):

  • event_count: The number of events, between 1 and 10000, that the UI fuzz test should perform.

  • throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.

  • seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.

For Explorer tests:

  • username: A username to use if the Explorer encounters a login form. If not supplied, no username will be inserted.

  • password: A password to use if the Explorer encounters a login form. If not supplied, no password will be inserted.

For Instrumentation:

  • filter: A test filter string. Examples:

    • Running a single test case: \"com.android.abc.Test1\"

    • Running a single test: \"com.android.abc.Test1#smoke\"

    • Running multiple tests: \"com.android.abc.Test1,com.android.abc.Test2\"

For XCTest and XCTestUI:

  • filter: A test filter string. Examples:

    • Running a single test class: \"LoginTests\"

    • Running a multiple test classes: \"LoginTests,SmokeTests\"

    • Running a single test: \"LoginTests/testValid\"

    • Running multiple tests: \"LoginTests/testValid,LoginTests/testInvalid\"

For UIAutomator:

  • filter: A test filter string. Examples:

    • Running a single test case: \"com.android.abc.Test1\"

    • Running a single test: \"com.android.abc.Test1#smoke\"

    • Running multiple tests: \"com.android.abc.Test1,com.android.abc.Test2\"

" + "documentation":"

The test's parameters, such as test framework parameters and fixture settings. Parameters are represented by name-value pairs of strings.

For all tests:

  • app_performance_monitoring: Performance monitoring is enabled by default. Set this parameter to false to disable it.

For Calabash tests:

  • profile: A cucumber profile (for example, my_profile_name).

  • tags: You can limit execution to features or scenarios that have (or don't have) certain tags (for example, @smoke or @smoke,~@wip).

For Appium tests (all types):

  • appium_version: The Appium version. Currently supported values are 1.6.5 (and later), latest, and default.

    • latest runs the latest Appium version supported by Device Farm (1.9.1).

    • For default, Device Farm selects a compatible version of Appium for the device. The current behavior is to run 1.7.2 on Android devices and iOS 9 and earlier and 1.7.2 for iOS 10 and later.

    • This behavior is subject to change.

For fuzz tests (Android only):

  • event_count: The number of events, between 1 and 10000, that the UI fuzz test should perform.

  • throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.

  • seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.

For Explorer tests:

  • username: A user name to use if the Explorer encounters a login form. If not supplied, no user name is inserted.

  • password: A password to use if the Explorer encounters a login form. If not supplied, no password is inserted.

For Instrumentation:

  • filter: A test filter string. Examples:

    • Running a single test case: com.android.abc.Test1

    • Running a single test: com.android.abc.Test1#smoke

    • Running multiple tests: com.android.abc.Test1,com.android.abc.Test2

For XCTest and XCTestUI:

  • filter: A test filter string. Examples:

    • Running a single test class: LoginTests

    • Running a multiple test classes: LoginTests,SmokeTests

    • Running a single test: LoginTests/testValid

    • Running multiple tests: LoginTests/testValid,LoginTests/testInvalid

For UIAutomator:

  • filter: A test filter string. Examples:

    • Running a single test case: com.android.abc.Test1

    • Running a single test: com.android.abc.Test1#smoke

    • Running multiple tests: com.android.abc.Test1,com.android.abc.Test2

" } }, - "documentation":"

Represents test settings. This data structure is passed in as the \"test\" parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

" + "documentation":"

Represents test settings. This data structure is passed in as the test parameter to ScheduleRun. For an example of the JSON request syntax, see ScheduleRun.

" }, "ServiceAccountException":{ "type":"structure", @@ -4223,7 +4677,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm job you wish to stop.

" + "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm job to stop.

" } } }, @@ -4242,7 +4696,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the remote access session you wish to stop.

" + "documentation":"

The Amazon Resource Name (ARN) of the remote access session to stop.

" } }, "documentation":"

Represents the request to stop the remote access session.

" @@ -4252,7 +4706,7 @@ "members":{ "remoteAccessSession":{ "shape":"RemoteAccessSession", - "documentation":"

A container representing the metadata from the service about the remote access session you are stopping.

" + "documentation":"

A container that represents the metadata from the service about the remote access session you are stopping.

" } }, "documentation":"

Represents the response from the server that describes the remote access session when AWS Device Farm stops the session.

" @@ -4263,7 +4717,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm run you wish to stop.

" + "documentation":"

Represents the Amazon Resource Name (ARN) of the Device Farm run to stop.

" } }, "documentation":"

Represents the request to stop a specific run.

" @@ -4292,7 +4746,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The suite's type.

Must be one of the following values:

  • BUILTIN_FUZZ: The built-in fuzz type.

  • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

  • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

  • APPIUM_PYTHON: The Appium Python type.

  • APPIUM_NODE: The Appium Node.js type.

  • APPIUM_RUBY: The Appium Ruby type.

  • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.

  • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.

  • APPIUM_WEB_PYTHON: The Appium Python type for web apps.

  • APPIUM_WEB_NODE: The Appium Node.js type for web apps.

  • APPIUM_WEB_RUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTEST_UI: The Xcode UI test type.

" + "documentation":"

The suite's type.

Must be one of the following values:

  • BUILTIN_FUZZ

  • BUILTIN_EXPLORER

    Only available for Android; an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT

  • APPIUM_JAVA_TESTNG

  • APPIUM_PYTHON

  • APPIUM_NODE

  • APPIUM_RUBY

  • APPIUM_WEB_JAVA_JUNIT

  • APPIUM_WEB_JAVA_TESTNG

  • APPIUM_WEB_PYTHON

  • APPIUM_WEB_NODE

  • APPIUM_WEB_RUBY

  • CALABASH

  • INSTRUMENTATION

  • UIAUTOMATION

  • UIAUTOMATOR

  • XCTEST

  • XCTEST_UI

" }, "created":{ "shape":"DateTime", @@ -4300,11 +4754,11 @@ }, "status":{ "shape":"ExecutionStatus", - "documentation":"

The suite's status.

Allowed values include:

  • PENDING: A pending status.

  • PENDING_CONCURRENCY: A pending concurrency status.

  • PENDING_DEVICE: A pending device status.

  • PROCESSING: A processing status.

  • SCHEDULING: A scheduling status.

  • PREPARING: A preparing status.

  • RUNNING: A running status.

  • COMPLETED: A completed status.

  • STOPPING: A stopping status.

" + "documentation":"

The suite's status.

Allowed values include:

  • PENDING

  • PENDING_CONCURRENCY

  • PENDING_DEVICE

  • PROCESSING

  • SCHEDULING

  • PREPARING

  • RUNNING

  • COMPLETED

  • STOPPING

" }, "result":{ "shape":"ExecutionResult", - "documentation":"

The suite's result.

Allowed values include:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

The suite's result.

Allowed values include:

  • PENDING

  • PASSED

  • WARNED

  • FAILED

  • SKIPPED

  • ERRORED

  • STOPPED

" }, "started":{ "shape":"DateTime", @@ -4342,14 +4796,14 @@ "members":{ "Key":{ "shape":"TagKey", - "documentation":"

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

" + "documentation":"

One part of a key-value pair that makes up a tag. A key is a general label that acts like a category for more specific tag values.

" }, "Value":{ "shape":"TagValue", - "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" + "documentation":"

The optional part of a key-value pair that makes up a tag. A value acts as a descriptor in a tag category (key).

" } }, - "documentation":"

The metadata that you apply to a resource to help you categorize and organize it. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The metadata that you apply to a resource to help you categorize and organize it. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters. Tag values can have a maximum length of 256 characters.

" }, "TagKey":{ "type":"string", @@ -4392,12 +4846,12 @@ ], "members":{ "ResourceARN":{ - "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the resource(s) to which to add tags. You can associate tags with the following Device Farm resources: PROJECT, RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, DEVICE, and VPCE_CONFIGURATION.

" + "shape":"DeviceFarmArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource or resources to which to add tags. You can associate tags with the following Device Farm resources: PROJECT, RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, DEVICE, and VPCE_CONFIGURATION.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters. Tag values can have a maximum length of 256 characters.

" } } }, @@ -4424,7 +4878,7 @@ }, "type":{ "shape":"TestType", - "documentation":"

The test's type.

Must be one of the following values:

  • BUILTIN_FUZZ: The built-in fuzz type.

  • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

  • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

  • APPIUM_PYTHON: The Appium Python type.

  • APPIUM_NODE: The Appium Node.js type.

  • APPIUM_RUBY: The Appium Ruby type.

  • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for web apps.

  • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for web apps.

  • APPIUM_WEB_PYTHON: The Appium Python type for web apps.

  • APPIUM_WEB_NODE: The Appium Node.js type for web apps.

  • APPIUM_WEB_RUBY: The Appium Ruby type for web apps.

  • CALABASH: The Calabash type.

  • INSTRUMENTATION: The Instrumentation type.

  • UIAUTOMATION: The uiautomation type.

  • UIAUTOMATOR: The uiautomator type.

  • XCTEST: The Xcode test type.

  • XCTEST_UI: The Xcode UI test type.

" + "documentation":"

The test's type.

Must be one of the following values:

  • BUILTIN_FUZZ

  • BUILTIN_EXPLORER

    For Android, an app explorer that traverses an Android app, interacting with it and capturing screenshots at the same time.

  • APPIUM_JAVA_JUNIT

  • APPIUM_JAVA_TESTNG

  • APPIUM_PYTHON

  • APPIUM_NODE

  • APPIUM_RUBY

  • APPIUM_WEB_JAVA_JUNIT

  • APPIUM_WEB_JAVA_TESTNG

  • APPIUM_WEB_PYTHON

  • APPIUM_WEB_NODE

  • APPIUM_WEB_RUBY

  • CALABASH

  • INSTRUMENTATION

  • UIAUTOMATION

  • UIAUTOMATOR

  • XCTEST

  • XCTEST_UI

" }, "created":{ "shape":"DateTime", @@ -4432,11 +4886,11 @@ }, "status":{ "shape":"ExecutionStatus", - "documentation":"

The test's status.

Allowed values include:

  • PENDING: A pending status.

  • PENDING_CONCURRENCY: A pending concurrency status.

  • PENDING_DEVICE: A pending device status.

  • PROCESSING: A processing status.

  • SCHEDULING: A scheduling status.

  • PREPARING: A preparing status.

  • RUNNING: A running status.

  • COMPLETED: A completed status.

  • STOPPING: A stopping status.

" + "documentation":"

The test's status.

Allowed values include:

  • PENDING

  • PENDING_CONCURRENCY

  • PENDING_DEVICE

  • PROCESSING

  • SCHEDULING

  • PREPARING

  • RUNNING

  • COMPLETED

  • STOPPING

" }, "result":{ "shape":"ExecutionResult", - "documentation":"

The test's result.

Allowed values include:

  • PENDING: A pending condition.

  • PASSED: A passing condition.

  • WARNED: A warning condition.

  • FAILED: A failed condition.

  • SKIPPED: A skipped condition.

  • ERRORED: An error condition.

  • STOPPED: A stopped condition.

" + "documentation":"

The test's result.

Allowed values include:

  • PENDING

  • PASSED

  • WARNED

  • FAILED

  • SKIPPED

  • ERRORED

  • STOPPED

" }, "started":{ "shape":"DateTime", @@ -4461,6 +4915,146 @@ }, "documentation":"

Represents a condition that is evaluated.

" }, + "TestGridProject":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN for the project.

" + }, + "name":{ + "shape":"String", + "documentation":"

A human-readable name for the project.

" + }, + "description":{ + "shape":"String", + "documentation":"

A human-readable description for the project.

" + }, + "created":{ + "shape":"DateTime", + "documentation":"

When the project was created.

" + } + }, + "documentation":"

A Selenium testing project. Projects are used to collect and collate sessions.

" + }, + "TestGridProjects":{ + "type":"list", + "member":{"shape":"TestGridProject"} + }, + "TestGridSession":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"DeviceFarmArn", + "documentation":"

The ARN of the session.

" + }, + "status":{ + "shape":"TestGridSessionStatus", + "documentation":"

The state of the session.

" + }, + "created":{ + "shape":"DateTime", + "documentation":"

The time that the session was started.

" + }, + "ended":{ + "shape":"DateTime", + "documentation":"

The time the session ended.

" + }, + "billingMinutes":{ + "shape":"Double", + "documentation":"

The number of billed minutes that were used for this session.

" + }, + "seleniumProperties":{ + "shape":"String", + "documentation":"

A JSON object of options and parameters passed to the Selenium WebDriver.

" + } + }, + "documentation":"

A TestGridSession is a single instance of a browser launched from the URL provided by a call to CreateTestGridUrl.

" + }, + "TestGridSessionAction":{ + "type":"structure", + "members":{ + "action":{ + "shape":"String", + "documentation":"

The action taken by the session.

" + }, + "started":{ + "shape":"DateTime", + "documentation":"

The time that the session invoked the action.

" + }, + "duration":{ + "shape":"Long", + "documentation":"

The time, in milliseconds, that the action took to complete in the browser.

" + }, + "statusCode":{ + "shape":"String", + "documentation":"

HTTP status code returned to the browser when the action was taken.

" + }, + "requestMethod":{ + "shape":"String", + "documentation":"

HTTP method that the browser used to make the request.

" + } + }, + "documentation":"

An action taken by a TestGridSession browser instance.

" + }, + "TestGridSessionActions":{ + "type":"list", + "member":{"shape":"TestGridSessionAction"} + }, + "TestGridSessionArtifact":{ + "type":"structure", + "members":{ + "filename":{ + "shape":"String", + "documentation":"

The file name of the artifact.

" + }, + "type":{ + "shape":"TestGridSessionArtifactType", + "documentation":"

The kind of artifact.

" + }, + "url":{ + "shape":"String", + "documentation":"

A semi-stable URL to the content of the object.

" + } + }, + "documentation":"

Artifacts are video and other files that are produced in the process of running a browser in an automated context.

Video elements might be broken up into multiple artifacts as they grow in size during creation.

" + }, + "TestGridSessionArtifactCategory":{ + "type":"string", + "enum":[ + "VIDEO", + "LOG" + ] + }, + "TestGridSessionArtifactType":{ + "type":"string", + "enum":[ + "UNKNOWN", + "VIDEO", + "SELENIUM_LOG" + ] + }, + "TestGridSessionArtifacts":{ + "type":"list", + "member":{"shape":"TestGridSessionArtifact"} + }, + "TestGridSessionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "CLOSED", + "ERRORED" + ] + }, + "TestGridSessions":{ + "type":"list", + "member":{"shape":"TestGridSession"} + }, + "TestGridUrlExpiresInSecondsInput":{ + "type":"integer", + "max":86400, + "min":60 + }, "TestParameters":{ "type":"map", "key":{"shape":"String"}, @@ -4559,8 +5153,8 @@ ], "members":{ "ResourceARN":{ - "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the resource(s) from which to delete tags. You can associate tags with the following Device Farm resources: PROJECT, RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, DEVICE, and VPCE_CONFIGURATION.

" + "shape":"DeviceFarmArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource or resources from which to delete tags. You can associate tags with the following Device Farm resources: PROJECT, RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, DEVICE, and VPCE_CONFIGURATION.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4583,7 +5177,7 @@ }, "profileArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the profile that you want to associate with the device instance.

" + "documentation":"

The ARN of the profile that you want to associate with the device instance.

" }, "labels":{ "shape":"InstanceLabels", @@ -4596,7 +5190,7 @@ "members":{ "deviceInstance":{ "shape":"DeviceInstance", - "documentation":"

An object containing information about your device instance.

" + "documentation":"

An object that contains information about your device instance.

" } } }, @@ -4606,19 +5200,19 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the Device Farm device pool you wish to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the Device Farm device pool to update.

" }, "name":{ "shape":"Name", - "documentation":"

A string representing the name of the device pool you wish to update.

" + "documentation":"

A string that represents the name of the device pool to update.

" }, "description":{ "shape":"Message", - "documentation":"

A description of the device pool you wish to update.

" + "documentation":"

A description of the device pool to update.

" }, "rules":{ "shape":"Rules", - "documentation":"

Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.

" + "documentation":"

Represents the rules to modify for the device pool. Updating rules is optional. If you update rules for your request, the update replaces the existing rules.

" }, "maxDevices":{ "shape":"Integer", @@ -4626,7 +5220,7 @@ }, "clearMaxDevices":{ "shape":"Boolean", - "documentation":"

Sets whether the maxDevices parameter applies to your device pool. If you set this parameter to true, the maxDevices parameter does not apply, and Device Farm does not limit the number of devices that it adds to your device pool. In this case, Device Farm adds all available devices that meet the criteria that are specified for the rules parameter.

If you use this parameter in your request, you cannot use the maxDevices parameter in the same request.

" + "documentation":"

Sets whether the maxDevices parameter applies to your device pool. If you set this parameter to true, the maxDevices parameter does not apply, and Device Farm does not limit the number of devices that it adds to your device pool. In this case, Device Farm adds all available devices that meet the criteria specified in the rules parameter.

If you use this parameter in your request, you cannot use the maxDevices parameter in the same request.

" } }, "documentation":"

Represents a request to the update device pool operation.

" @@ -4663,7 +5257,7 @@ }, "excludeAppPackagesFromCleanup":{ "shape":"PackageIds", - "documentation":"

An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.

The list of packages is only considered if you set packageCleanup to true.

" + "documentation":"

An array of strings that specifies the list of app packages that should not be cleaned up from the device after a test run is over.

The list of packages is only considered if you set packageCleanup to true.

" }, "rebootAfterUse":{ "shape":"Boolean", @@ -4676,7 +5270,7 @@ "members":{ "instanceProfile":{ "shape":"InstanceProfile", - "documentation":"

An object containing information about your instance profile.

" + "documentation":"

An object that contains information about your instance profile.

" } } }, @@ -4698,7 +5292,7 @@ }, "type":{ "shape":"NetworkProfileType", - "documentation":"

The type of network profile you wish to return information about. Valid values are listed below.

" + "documentation":"

The type of network profile to return information about. Valid values are listed here.

" }, "uplinkBandwidthBits":{ "shape":"Long", @@ -4749,15 +5343,15 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the project whose name you wish to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the project whose name to update.

" }, "name":{ "shape":"Name", - "documentation":"

A string representing the new name of the project that you are updating.

" + "documentation":"

A string that represents the new name of the project that you are updating.

" }, "defaultJobTimeoutMinutes":{ "shape":"JobTimeoutMinutes", - "documentation":"

The number of minutes a test run in the project will execute before it times out.

" + "documentation":"

The number of minutes a test run in the project executes before it times out.

" } }, "documentation":"

Represents a request to the update project operation.

" @@ -4767,11 +5361,38 @@ "members":{ "project":{ "shape":"Project", - "documentation":"

The project you wish to update.

" + "documentation":"

The project to update.

" } }, "documentation":"

Represents the result of an update project request.

" }, + "UpdateTestGridProjectRequest":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{ + "shape":"DeviceFarmArn", + "documentation":"

ARN of the project to update.

" + }, + "name":{ + "shape":"ResourceName", + "documentation":"

Human-readable name for the project.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

Human-readable description for the project.

" + } + } + }, + "UpdateTestGridProjectResult":{ + "type":"structure", + "members":{ + "testGridProject":{ + "shape":"TestGridProject", + "documentation":"

The project, including updated information.

" + } + } + }, "UpdateUploadRequest":{ "type":"structure", "required":["arn"], @@ -4782,15 +5403,15 @@ }, "name":{ "shape":"Name", - "documentation":"

The upload's test spec file name. The name should not contain the '/' character. The test spec file name must end with the .yaml or .yml file extension.

" + "documentation":"

The upload's test spec file name. The name must not contain any forward slashes (/). The test spec file name must end with the .yaml or .yml file extension.

" }, "contentType":{ "shape":"ContentType", - "documentation":"

The upload's content type (for example, \"application/x-yaml\").

" + "documentation":"

The upload's content type (for example, application/x-yaml).

" }, "editContent":{ "shape":"Boolean", - "documentation":"

Set to true if the YAML file has changed and needs to be updated; otherwise, set to false.

" + "documentation":"

Set to true if the YAML file has changed and must be updated. Otherwise, set to false.

" } } }, @@ -4813,19 +5434,19 @@ }, "vpceConfigurationName":{ "shape":"VPCEConfigurationName", - "documentation":"

The friendly name you give to your VPC endpoint configuration, to manage your configurations more easily.

" + "documentation":"

The friendly name you give to your VPC endpoint configuration to manage your configurations more easily.

" }, "vpceServiceName":{ "shape":"VPCEServiceName", - "documentation":"

The name of the VPC endpoint service running inside your AWS account that you want Device Farm to test.

" + "documentation":"

The name of the VPC endpoint service running in your AWS account that you want Device Farm to test.

" }, "serviceDnsName":{ "shape":"ServiceDnsName", - "documentation":"

The DNS (domain) name used to connect to your private service in your Amazon VPC. The DNS name must not already be in use on the Internet.

" + "documentation":"

The DNS (domain) name used to connect to your private service in your VPC. The DNS name must not already be in use on the internet.

" }, "vpceConfigurationDescription":{ "shape":"VPCEConfigurationDescription", - "documentation":"

An optional description, providing more details about your VPC endpoint configuration.

" + "documentation":"

An optional description that provides details about your VPC endpoint configuration.

" } } }, @@ -4834,7 +5455,7 @@ "members":{ "vpceConfiguration":{ "shape":"VPCEConfiguration", - "documentation":"

An object containing information about your VPC endpoint configuration.

" + "documentation":"

An object that contains information about your VPC endpoint configuration.

" } } }, @@ -4855,15 +5476,15 @@ }, "type":{ "shape":"UploadType", - "documentation":"

The upload's type.

Must be one of the following values:

  • ANDROID_APP: An Android upload.

  • IOS_APP: An iOS upload.

  • WEB_APP: A web application upload.

  • EXTERNAL_DATA: An external data upload.

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

  • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

  • APPIUM_NODE_TEST_PACKAGE: An Appium Node.js test package upload.

  • APPIUM_RUBY_TEST_PACKAGE: An Appium Ruby test package upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload for web apps.

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload for web apps.

  • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload for web apps.

  • APPIUM_WEB_NODE_TEST_PACKAGE: An Appium Node.js test package upload for web apps.

  • APPIUM_WEB_RUBY_TEST_PACKAGE: An Appium Ruby test package upload for web apps.

  • CALABASH_TEST_PACKAGE: A Calabash test package upload.

  • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

  • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

  • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

  • XCTEST_TEST_PACKAGE: An Xcode test package upload.

  • XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload.

  • APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload.

  • APPIUM_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload.

  • APPIUM_PYTHON_TEST_SPEC: An Appium Python test spec upload.

  • APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload.

  • APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload.

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload for a web app.

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC: An Appium Java TestNG test spec upload for a web app.

  • APPIUM_WEB_PYTHON_TEST_SPEC: An Appium Python test spec upload for a web app.

  • APPIUM_WEB_NODE_TEST_SPEC: An Appium Node.js test spec upload for a web app.

  • APPIUM_WEB_RUBY_TEST_SPEC: An Appium Ruby test spec upload for a web app.

  • INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload.

  • XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload.

" + "documentation":"

The upload's type.

Must be one of the following values:

  • ANDROID_APP

  • IOS_APP

  • WEB_APP

  • EXTERNAL_DATA

  • APPIUM_JAVA_JUNIT_TEST_PACKAGE

  • APPIUM_JAVA_TESTNG_TEST_PACKAGE

  • APPIUM_PYTHON_TEST_PACKAGE

  • APPIUM_NODE_TEST_PACKAGE

  • APPIUM_RUBY_TEST_PACKAGE

  • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE

  • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE

  • APPIUM_WEB_PYTHON_TEST_PACKAGE

  • APPIUM_WEB_NODE_TEST_PACKAGE

  • APPIUM_WEB_RUBY_TEST_PACKAGE

  • CALABASH_TEST_PACKAGE

  • INSTRUMENTATION_TEST_PACKAGE

  • UIAUTOMATION_TEST_PACKAGE

  • UIAUTOMATOR_TEST_PACKAGE

  • XCTEST_TEST_PACKAGE

  • XCTEST_UI_TEST_PACKAGE

  • APPIUM_JAVA_JUNIT_TEST_SPEC

  • APPIUM_JAVA_TESTNG_TEST_SPEC

  • APPIUM_PYTHON_TEST_SPEC

  • APPIUM_NODE_TEST_SPEC

  • APPIUM_RUBY_TEST_SPEC

  • APPIUM_WEB_JAVA_JUNIT_TEST_SPEC

  • APPIUM_WEB_JAVA_TESTNG_TEST_SPEC

  • APPIUM_WEB_PYTHON_TEST_SPEC

  • APPIUM_WEB_NODE_TEST_SPEC

  • APPIUM_WEB_RUBY_TEST_SPEC

  • INSTRUMENTATION_TEST_SPEC

  • XCTEST_UI_TEST_SPEC

" }, "status":{ "shape":"UploadStatus", - "documentation":"

The upload's status.

Must be one of the following values:

  • FAILED: A failed status.

  • INITIALIZED: An initialized status.

  • PROCESSING: A processing status.

  • SUCCEEDED: A succeeded status.

" + "documentation":"

The upload's status.

Must be one of the following values:

  • FAILED

  • INITIALIZED

  • PROCESSING

  • SUCCEEDED

" }, "url":{ "shape":"URL", - "documentation":"

The pre-signed Amazon S3 URL that was used to store a file through a corresponding PUT request.

" + "documentation":"

The presigned Amazon S3 URL that was used to store a file using a PUT request.

" }, "metadata":{ "shape":"Metadata", @@ -4871,7 +5492,7 @@ }, "contentType":{ "shape":"ContentType", - "documentation":"

The upload's content type (for example, \"application/octet-stream\").

" + "documentation":"

The upload's content type (for example, application/octet-stream).

" }, "message":{ "shape":"Message", @@ -4950,11 +5571,11 @@ }, "vpceConfigurationName":{ "shape":"VPCEConfigurationName", - "documentation":"

The friendly name you give to your VPC endpoint configuration, to manage your configurations more easily.

" + "documentation":"

The friendly name you give to your VPC endpoint configuration to manage your configurations more easily.

" }, "vpceServiceName":{ "shape":"VPCEServiceName", - "documentation":"

The name of the VPC endpoint service running inside your AWS account that you want Device Farm to test.

" + "documentation":"

The name of the VPC endpoint service running in your AWS account that you want Device Farm to test.

" }, "serviceDnsName":{ "shape":"ServiceDnsName", @@ -4962,7 +5583,7 @@ }, "vpceConfigurationDescription":{ "shape":"VPCEConfigurationDescription", - "documentation":"

An optional description, providing more details about your VPC endpoint configuration.

" + "documentation":"

An optional description that provides details about your VPC endpoint configuration.

" } }, "documentation":"

Represents an Amazon Virtual Private Cloud (VPC) endpoint configuration.

" @@ -4988,5 +5609,5 @@ }, "VideoCapture":{"type":"boolean"} }, - "documentation":"

AWS Device Farm is a service that enables mobile app developers to test Android, iOS, and Fire OS apps on physical phones, tablets, and other devices in the cloud.

" + "documentation":"

Welcome to the AWS Device Farm API documentation, which contains APIs for:

  • Testing on desktop browsers

    Device Farm makes it possible for you to test your web applications on desktop browsers using Selenium. The APIs for desktop browser testing contain TestGrid in their names. For more information, see Testing Web Applications on Selenium with Device Farm.

  • Testing on real mobile devices

    Device Farm makes it possible for you to test apps on physical phones, tablets, and other devices in the cloud. For more information, see the Device Farm Developer Guide.

" } diff --git a/botocore/data/discovery/2015-11-01/service-2.json b/botocore/data/discovery/2015-11-01/service-2.json index 4862623e..a7d015d4 100644 --- a/botocore/data/discovery/2015-11-01/service-2.json +++ b/botocore/data/discovery/2015-11-01/service-2.json @@ -148,7 +148,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

Retrieves attributes for a list of configuration item IDs.

All of the supplied IDs must be for the same asset type from one of the following:

  • server

  • application

  • process

  • connection

Output fields are specific to the asset type specified. For example, the output for a server configuration item includes a list of attributes about the server, such as host name, operating system, number of network cards, etc.

For a complete list of outputs for each asset type, see Using the DescribeConfigurations Action.

" + "documentation":"

Retrieves attributes for a list of configuration item IDs.

All of the supplied IDs must be for the same asset type from one of the following:

  • server

  • application

  • process

  • connection

Output fields are specific to the asset type specified. For example, the output for a server configuration item includes a list of attributes about the server, such as host name, operating system, number of network cards, etc.

For a complete list of outputs for each asset type, see Using the DescribeConfigurations Action in the AWS Application Discovery Service User Guide.

" }, "DescribeContinuousExports":{ "name":"DescribeContinuousExports", @@ -308,7 +308,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

Retrieves a list of configuration items as specified by the value passed to the required paramater configurationType. Optional filtering may be applied to refine search results.

" + "documentation":"

Retrieves a list of configuration items as specified by the value passed to the required parameter configurationType. Optional filtering may be applied to refine search results.

" }, "ListServerNeighbors":{ "name":"ListServerNeighbors", @@ -398,7 +398,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

Starts an import task, which allows you to import details of your on-premises environment directly into AWS without having to use the Application Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

To start an import request, do this:

  1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

  2. Fill out the template with your server and application data.

  3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

  4. Use the console or the StartImportTask command with the AWS CLI or one of the AWS SDKs to import the records from your file.

For more information, including step-by-step procedures, see Migration Hub Import in the AWS Application Discovery Service User Guide.

There are limits to the number of import tasks you can create (and delete) in an AWS account. For more information, see AWS Application Discovery Service Limits in the AWS Application Discovery Service User Guide.

" + "documentation":"

Starts an import task, which allows you to import details of your on-premises environment directly into AWS Migration Hub without having to use the Application Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

To start an import request, do this:

  1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

  2. Fill out the template with your server and application data.

  3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

  4. Use the console or the StartImportTask command with the AWS CLI or one of the AWS SDKs to import the records from your file.

For more information, including step-by-step procedures, see Migration Hub Import in the AWS Application Discovery Service User Guide.

There are limits to the number of import tasks you can create (and delete) in an AWS account. For more information, see AWS Application Discovery Service Limits in the AWS Application Discovery Service User Guide.

" }, "StopContinuousExport":{ "name":"StopContinuousExport", @@ -1337,7 +1337,7 @@ "documentation":"

A conditional operator. The following operators are valid: EQUALS, NOT_EQUALS, CONTAINS, NOT_CONTAINS. If you specify multiple filters, the system utilizes all filters as though concatenated by AND. If you specify multiple values for a particular filter, the system differentiates the values using OR. Calling either DescribeConfigurations or ListConfigurations returns attributes of matching configuration items.

" } }, - "documentation":"

A filter that can use conditional operators.

For more information about filters, see Querying Discovered Configuration Items.

" + "documentation":"

A filter that can use conditional operators.

For more information about filters, see Querying Discovered Configuration Items in the AWS Application Discovery Service User Guide.

" }, "FilterName":{"type":"string"}, "FilterValue":{"type":"string"}, @@ -1540,7 +1540,7 @@ }, "filters":{ "shape":"Filters", - "documentation":"

You can filter the request using various logical operators and a key-value format. For example:

{\"key\": \"serverType\", \"value\": \"webServer\"}

For a complete list of filter options and guidance about using them with this action, see Querying Discovered Configuration Items.

" + "documentation":"

You can filter the request using various logical operators and a key-value format. For example:

{\"key\": \"serverType\", \"value\": \"webServer\"}

For a complete list of filter options and guidance about using them with this action, see Using the ListConfigurations Action in the AWS Application Discovery Service User Guide.

" }, "maxResults":{ "shape":"Integer", @@ -1552,7 +1552,7 @@ }, "orderBy":{ "shape":"OrderByList", - "documentation":"

Certain filter criteria return output that can be sorted in ascending or descending order. For a list of output characteristics for each filter, see Using the ListConfigurations Action.

" + "documentation":"

Certain filter criteria return output that can be sorted in ascending or descending order. For a list of output characteristics for each filter, see Using the ListConfigurations Action in the AWS Application Discovery Service User Guide.

" } } }, @@ -1952,5 +1952,5 @@ ] } }, - "documentation":"AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation:

  • Agentless discovery mode is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment. Agentless discovery doesn't collect information about software and software dependencies. It also doesn't work in non-VMware environments.

  • Agent-based discovery mode collects a richer set of data than agentless discovery by using the AWS Application Discovery Agent, which you install on one or more hosts in your data center. The agent captures infrastructure and application information, including an inventory of installed software applications, system and process performance, resource utilization, and network dependencies between workloads. The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud.

We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException error will be returned. Also, you must make the API calls while in your home region.

" + "documentation":"AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the AWS Application Discovery Service FAQ. Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers:

  • Agentless discovery is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. It does not work in non-VMware environments.

    • Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment.

    • Agentless discovery doesn't collect information about network dependencies, only agent-based discovery collects that information.

  • Agent-based discovery collects a richer set of data than agentless discovery by using the AWS Application Discovery Agent, which you install on one or more hosts in your data center.

    • The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies.

    • The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud.

  • AWS Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Migration Hub without using the discovery connector or discovery agent.

    • Third-party application discovery tools can query AWS Application Discovery Service, and they can write to the Application Discovery Service database using the public API.

    • In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations.

Recommendations

We recommend that you use agent-based discovery for non-VMware environments, and whenever you want to collect information about network dependencies. You can run agent-based and agentless discovery simultaneously. Use agentless discovery to complete the initial infrastructure assessment quickly, and then install agents on select hosts to collect additional information.

Working With This Guide

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

  • Remember that you must set your Migration Hub home region before you call any of these APIs.

  • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned.

  • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region.

  • Although it is unlikely, the Migration Hub home region could change. If you call APIs outside the home region, an InvalidInputException is returned.

  • You must call GetHomeRegion to obtain the latest Migration Hub home region.

This guide is intended for use with the AWS Application Discovery Service User Guide.

All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

" } diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json index c2622312..76ff6e78 100644 --- a/botocore/data/dlm/2018-01-12/service-2.json +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -149,7 +149,14 @@ "max":10, "min":1 }, + "CmkArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"arn:aws(-[a-z]{1,3}){0,2}:kms:([a-z]+-){2,3}\\d:\\d+:key/.*" + }, "CopyTags":{"type":"boolean"}, + "CopyTagsNullable":{"type":"boolean"}, "Count":{ "type":"integer", "max":1000, @@ -217,6 +224,56 @@ }, "documentation":"

Specifies when to create snapshots of EBS volumes.

" }, + "CrossRegionCopyRetainRule":{ + "type":"structure", + "members":{ + "Interval":{ + "shape":"Interval", + "documentation":"

The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days.

" + }, + "IntervalUnit":{ + "shape":"RetentionIntervalUnitValues", + "documentation":"

The unit of time for time-based retention.

" + } + }, + "documentation":"

Specifies the retention rule for cross-Region snapshot copies.

" + }, + "CrossRegionCopyRule":{ + "type":"structure", + "required":[ + "TargetRegion", + "Encrypted" + ], + "members":{ + "TargetRegion":{ + "shape":"TargetRegion", + "documentation":"

The target Region.

" + }, + "Encrypted":{ + "shape":"Encrypted", + "documentation":"

To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or if encryption by default is not enabled.

" + }, + "CmkArn":{ + "shape":"CmkArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS customer master key (CMK) to use for EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used.

" + }, + "CopyTags":{ + "shape":"CopyTagsNullable", + "documentation":"

Copy all user-defined tags from the source snapshot to the copied snapshot.

" + }, + "RetainRule":{ + "shape":"CrossRegionCopyRetainRule", + "documentation":"

The retention rule.

" + } + }, + "documentation":"

Specifies a rule for cross-Region snapshot copies.

" + }, + "CrossRegionCopyRules":{ + "type":"list", + "member":{"shape":"CrossRegionCopyRule"}, + "max":3, + "min":0 + }, "DeleteLifecyclePolicyRequest":{ "type":"structure", "required":["PolicyId"], @@ -234,6 +291,7 @@ "members":{ } }, + "Encrypted":{"type":"boolean"}, "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, "ExcludeBootVolume":{"type":"boolean"}, @@ -241,7 +299,7 @@ "type":"string", "max":2048, "min":0, - "pattern":"arn:aws:iam::\\d+:role/.*" + "pattern":"arn:aws(-[a-z]{1,3}){0,2}:iam::\\d+:role/.*" }, "FastRestoreRule":{ "type":"structure", @@ -492,14 +550,16 @@ "members":{ "ExcludeBootVolume":{ "shape":"ExcludeBootVolume", - "documentation":"

When executing an EBS Snapshot Management – Instance policy, execute all CreateSnapshots calls with the excludeBootVolume set to the supplied field. Defaults to false. Only valid for EBS Snapshot Management – Instance policies.

" + "documentation":"

[EBS Snapshot Management – Instance policies only] Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is false.

" } }, - "documentation":"

Optional parameters that can be added to the policy. The set of valid parameters depends on the combination of policyType and resourceType values.

" + "documentation":"

Specifies optional parameters to add to a policy. The set of valid parameters depends on the combination of policy type and resource type.

" }, "PolicyArn":{ "type":"string", - "pattern":"^arn:aws:dlm:[A-Za-z0-9_/.-]{0,63}:\\d+:policy/[0-9A-Za-z_-]{1,128}$" + "max":2048, + "min":0, + "pattern":"^arn:aws(-[a-z]{1,3}){0,2}:dlm:[A-Za-z0-9_/.-]{0,63}:\\d+:policy/[0-9A-Za-z_-]{1,128}$" }, "PolicyDescription":{ "type":"string", @@ -512,7 +572,7 @@ "members":{ "PolicyType":{ "shape":"PolicyTypeValues", - "documentation":"

This field determines the valid target resource types and actions a policy can manage. This field defaults to EBS_SNAPSHOT_MANAGEMENT if not present.

" + "documentation":"

The valid target resource types and actions a policy can manage. The default is EBS_SNAPSHOT_MANAGEMENT.

" }, "ResourceTypes":{ "shape":"ResourceTypeValuesList", @@ -528,7 +588,7 @@ }, "Parameters":{ "shape":"Parameters", - "documentation":"

A set of optional parameters that can be provided by the policy.

" + "documentation":"

A set of optional parameters for the policy.

" } }, "documentation":"

Specifies the configuration of a lifecycle policy.

" @@ -626,7 +686,7 @@ }, "CreateRule":{ "shape":"CreateRule", - "documentation":"

The create rule.

" + "documentation":"

The creation rule.

" }, "RetainRule":{ "shape":"RetainRule", @@ -634,10 +694,14 @@ }, "FastRestoreRule":{ "shape":"FastRestoreRule", - "documentation":"

Enable fast snapshot restore.

" + "documentation":"

The rule for enabling fast snapshot restore.

" + }, + "CrossRegionCopyRules":{ + "shape":"CrossRegionCopyRules", + "documentation":"

The rule for cross-Region snapshot copies.

" } }, - "documentation":"

Specifies a schedule.

" + "documentation":"

Specifies a backup schedule.

" }, "ScheduleList":{ "type":"list", @@ -739,7 +803,8 @@ }, "TagValue":{ "type":"string", - "max":256 + "max":256, + "pattern":"[\\p{all}]*" }, "TagsToAddFilterList":{ "type":"list", @@ -750,9 +815,15 @@ "TagsToAddList":{ "type":"list", "member":{"shape":"Tag"}, - "max":50, + "max":45, "min":0 }, + "TargetRegion":{ + "type":"string", + "max":16, + "min":0, + "pattern":"([a-z]+-){2,3}\\d" + }, "TargetTagList":{ "type":"list", "member":{"shape":"Tag"}, @@ -842,7 +913,7 @@ "VariableTagsList":{ "type":"list", "member":{"shape":"Tag"}, - "max":50, + "max":45, "min":0 } }, diff --git a/botocore/data/docdb/2014-10-31/service-2.json b/botocore/data/docdb/2014-10-31/service-2.json index 5a57835f..995b2c3e 100644 --- a/botocore/data/docdb/2014-10-31/service-2.json +++ b/botocore/data/docdb/2014-10-31/service-2.json @@ -61,7 +61,7 @@ {"shape":"DBParameterGroupQuotaExceededFault"}, {"shape":"DBParameterGroupAlreadyExistsFault"} ], - "documentation":"

Copies the specified DB cluster parameter group.

" + "documentation":"

Copies the specified cluster parameter group.

" }, "CopyDBClusterSnapshot":{ "name":"CopyDBClusterSnapshot", @@ -82,7 +82,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

To cancel the copy operation after it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in the copying status.

" + "documentation":"

Copies a snapshot of a cluster.

To copy a cluster snapshot from a shared manual cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared cluster snapshot.

To cancel the copy operation after it is in progress, delete the target cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in the copying status.

" }, "CreateDBCluster":{ "name":"CreateDBCluster", @@ -112,7 +112,7 @@ {"shape":"DBInstanceNotFoundFault"}, {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"} ], - "documentation":"

Creates a new Amazon DocumentDB DB cluster.

" + "documentation":"

Creates a new Amazon DocumentDB cluster.

" }, "CreateDBClusterParameterGroup":{ "name":"CreateDBClusterParameterGroup", @@ -129,7 +129,7 @@ {"shape":"DBParameterGroupQuotaExceededFault"}, {"shape":"DBParameterGroupAlreadyExistsFault"} ], - "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after you create it. After you create a DB cluster parameter group, you must associate it with your DB cluster. For the new DB cluster parameter group and associated settings to take effect, you must then reboot the DB instances in the DB cluster without failover.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon DocumentDB to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This step is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter.

" + "documentation":"

Creates a new cluster parameter group.

Parameters in a cluster parameter group apply to all of the instances in a DB cluster.

A cluster parameter group is initially created with the default parameters for the database engine used by instances in the cluster. To provide custom values for any of the parameters, you must modify the group after you create it. After you create a DB cluster parameter group, you must associate it with your cluster. For the new DB cluster parameter group and associated settings to take effect, you must then reboot the instances in the cluster without failover.

After you create a cluster parameter group, you should wait at least 5 minutes before creating your first cluster that uses that cluster parameter group as the default parameter group. This allows Amazon DocumentDB to fully complete the create action before the cluster parameter group is used as the default for a new cluster. This step is especially important for parameters that are critical when creating the default database for a cluster, such as the character set for the default database defined by the character_set_database parameter.

" }, "CreateDBClusterSnapshot":{ "name":"CreateDBClusterSnapshot", @@ -149,7 +149,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterSnapshotStateFault"} ], - "documentation":"

Creates a snapshot of a DB cluster.

" + "documentation":"

Creates a snapshot of a cluster.

" }, "CreateDBInstance":{ "name":"CreateDBInstance", @@ -179,7 +179,7 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Creates a new DB instance.

" + "documentation":"

Creates a new instance.

" }, "CreateDBSubnetGroup":{ "name":"CreateDBSubnetGroup", @@ -199,7 +199,7 @@ {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, {"shape":"InvalidSubnet"} ], - "documentation":"

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.

" + "documentation":"

Creates a new subnet group. subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.

" }, "DeleteDBCluster":{ "name":"DeleteDBCluster", @@ -219,7 +219,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterSnapshotStateFault"} ], - "documentation":"

Deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

" + "documentation":"

Deletes a previously provisioned cluster. When you delete a cluster, all automated backups for that cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified cluster are not deleted.

" }, "DeleteDBClusterParameterGroup":{ "name":"DeleteDBClusterParameterGroup", @@ -232,7 +232,7 @@ {"shape":"InvalidDBParameterGroupStateFault"}, {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

" + "documentation":"

Deletes a specified cluster parameter group. The cluster parameter group to be deleted can't be associated with any clusters.

" }, "DeleteDBClusterSnapshot":{ "name":"DeleteDBClusterSnapshot", @@ -249,7 +249,7 @@ {"shape":"InvalidDBClusterSnapshotStateFault"}, {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

" + "documentation":"

Deletes a cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The cluster snapshot must be in the available state to be deleted.

" }, "DeleteDBInstance":{ "name":"DeleteDBInstance", @@ -269,7 +269,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Deletes a previously provisioned DB instance.

" + "documentation":"

Deletes a previously provisioned instance.

" }, "DeleteDBSubnetGroup":{ "name":"DeleteDBSubnetGroup", @@ -283,7 +283,7 @@ {"shape":"InvalidDBSubnetStateFault"}, {"shape":"DBSubnetGroupNotFoundFault"} ], - "documentation":"

Deletes a DB subnet group.

The specified database subnet group must not be associated with any DB instances.

" + "documentation":"

Deletes a subnet group.

The specified database subnet group must not be associated with any DB instances.

" }, "DescribeCertificates":{ "name":"DescribeCertificates", @@ -299,7 +299,7 @@ "errors":[ {"shape":"CertificateNotFoundFault"} ], - "documentation":"

Returns a list of certificate authority (CA) certificates provided by Amazon RDS for this AWS account.

" + "documentation":"

Returns a list of certificate authority (CA) certificates provided by Amazon DocumentDB for this AWS account. For certain management features such as cluster and instance lifecycle management, Amazon DocumentDB leverages operational technology that is shared with Amazon RDS and Amazon Neptune. Use the filterName=engine,Values=docdb filter parameter to return only Amazon DocumentDB clusters.

" }, "DescribeDBClusterParameterGroups":{ "name":"DescribeDBClusterParameterGroups", @@ -315,7 +315,7 @@ "errors":[ {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list contains only the description of the specified DB cluster parameter group.

" + "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list contains only the description of the specified cluster parameter group.

" }, "DescribeDBClusterParameters":{ "name":"DescribeDBClusterParameters", @@ -331,7 +331,7 @@ "errors":[ {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

" + "documentation":"

Returns the detailed parameter list for a particular cluster parameter group.

" }, "DescribeDBClusterSnapshotAttributes":{ "name":"DescribeDBClusterSnapshotAttributes", @@ -347,7 +347,7 @@ "errors":[ {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.

When you share snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If all is included in the list of values for the restore attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.

" + "documentation":"

Returns a list of cluster snapshot attribute names and values for a manual DB cluster snapshot.

When you share snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual cluster snapshot. If all is included in the list of values for the restore attribute, then the manual cluster snapshot is public and can be copied or restored by all AWS accounts.

" }, "DescribeDBClusterSnapshots":{ "name":"DescribeDBClusterSnapshots", @@ -363,7 +363,7 @@ "errors":[ {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Returns information about DB cluster snapshots. This API operation supports pagination.

" + "documentation":"

Returns information about cluster snapshots. This API operation supports pagination.

" }, "DescribeDBClusters":{ "name":"DescribeDBClusters", @@ -379,7 +379,7 @@ "errors":[ {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Returns information about provisioned Amazon DocumentDB DB clusters. This API operation supports pagination.

" + "documentation":"

Returns information about provisioned Amazon DocumentDB clusters. This API operation supports pagination.

" }, "DescribeDBEngineVersions":{ "name":"DescribeDBEngineVersions", @@ -392,7 +392,7 @@ "shape":"DBEngineVersionMessage", "resultWrapper":"DescribeDBEngineVersionsResult" }, - "documentation":"

Returns a list of the available DB engines.

" + "documentation":"

Returns a list of the available engines.

" }, "DescribeDBInstances":{ "name":"DescribeDBInstances", @@ -463,7 +463,7 @@ "shape":"EventsMessage", "resultWrapper":"DescribeEventsResult" }, - "documentation":"

Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. You can obtain events specific to a particular DB instance, DB security group, DB snapshot, or DB parameter group by providing the name as a parameter. By default, the events of the past hour are returned.

" + "documentation":"

Returns events related to instances, security groups, snapshots, and DB parameter groups for the past 14 days. You can obtain events specific to a particular DB instance, security group, snapshot, or parameter group by providing the name as a parameter. By default, the events of the past hour are returned.

" }, "DescribeOrderableDBInstanceOptions":{ "name":"DescribeOrderableDBInstanceOptions", @@ -476,7 +476,7 @@ "shape":"OrderableDBInstanceOptionsMessage", "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" }, - "documentation":"

Returns a list of orderable DB instance options for the specified engine.

" + "documentation":"

Returns a list of orderable instance options for the specified engine.

" }, "DescribePendingMaintenanceActions":{ "name":"DescribePendingMaintenanceActions", @@ -492,7 +492,7 @@ "errors":[ {"shape":"ResourceNotFoundFault"} ], - "documentation":"

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

" + "documentation":"

Returns a list of resources (for example, instances) that have at least one pending maintenance action.

" }, "FailoverDBCluster":{ "name":"FailoverDBCluster", @@ -510,7 +510,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Amazon DocumentDB replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

If the primary instance fails, Amazon DocumentDB automatically fails over to an Amazon DocumentDB replica, if one exists. You can force a failover when you want to simulate a failure of a primary instance for testing.

" + "documentation":"

Forces a failover for a cluster.

A failover for a cluster promotes one of the Amazon DocumentDB replicas (read-only instances) in the cluster to be the primary instance (the cluster writer).

If the primary instance fails, Amazon DocumentDB automatically fails over to an Amazon DocumentDB replica, if one exists. You can force a failover when you want to simulate a failure of a primary instance for testing.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -554,7 +554,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBClusterAlreadyExistsFault"} ], - "documentation":"

Modifies a setting for an Amazon DocumentDB DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" + "documentation":"

Modifies a setting for an Amazon DocumentDB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" }, "ModifyDBClusterParameterGroup":{ "name":"ModifyDBClusterParameterGroup", @@ -571,7 +571,7 @@ {"shape":"DBParameterGroupNotFoundFault"}, {"shape":"InvalidDBParameterGroupStateFault"} ], - "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot or maintenance window before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon DocumentDB to fully complete the create action before the parameter group is used as the default for a new DB cluster. This step is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter.

" + "documentation":"

Modifies the parameters of a cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot or maintenance window before the change can take effect.

After you create a cluster parameter group, you should wait at least 5 minutes before creating your first cluster that uses that cluster parameter group as the default parameter group. This allows Amazon DocumentDB to fully complete the create action before the parameter group is used as the default for a new cluster. This step is especially important for parameters that are critical when creating the default database for a cluster, such as the character set for the default database defined by the character_set_database parameter.

" }, "ModifyDBClusterSnapshotAttribute":{ "name":"ModifyDBClusterSnapshotAttribute", @@ -589,7 +589,7 @@ {"shape":"InvalidDBClusterSnapshotStateFault"}, {"shape":"SharedSnapshotQuotaExceededFault"} ], - "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName, and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

" + "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual cluster snapshot with other AWS accounts, specify restore as the AttributeName, and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual cluster snapshot. Use the value all to make the manual cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

" }, "ModifyDBInstance":{ "name":"ModifyDBInstance", @@ -617,7 +617,7 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"CertificateNotFoundFault"} ], - "documentation":"

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" + "documentation":"

Modifies settings for an instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" }, "ModifyDBSubnetGroup":{ "name":"ModifyDBSubnetGroup", @@ -637,7 +637,7 @@ {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, {"shape":"InvalidSubnet"} ], - "documentation":"

Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.

" + "documentation":"

Modifies an existing subnet group. subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.

" }, "RebootDBInstance":{ "name":"RebootDBInstance", @@ -654,7 +654,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBInstanceNotFoundFault"} ], - "documentation":"

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain changes, or if you change the DB cluster parameter group that is associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

" + "documentation":"

You might need to reboot your instance, usually for maintenance reasons. For example, if you make certain changes, or if you change the cluster parameter group that is associated with the instance, you must reboot the instance for the changes to take effect.

Rebooting an instance restarts the database engine service. Rebooting an instance results in a momentary outage, during which the instance status is set to rebooting.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -685,7 +685,7 @@ {"shape":"InvalidDBParameterGroupStateFault"}, {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters, submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When you reset the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance reboot.

" + "documentation":"

Modifies the parameters of a cluster parameter group to the default value. To reset specific parameters, submit a list of the following: ParameterName and ApplyMethod. To reset the entire cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When you reset the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance reboot.

" }, "RestoreDBClusterFromSnapshot":{ "name":"RestoreDBClusterFromSnapshot", @@ -716,7 +716,7 @@ {"shape":"InvalidSubnet"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

" + "documentation":"

Creates a new cluster from a snapshot or cluster snapshot.

If a snapshot is specified, the target cluster is created from the source DB snapshot with a default configuration and default security group.

If a cluster snapshot is specified, the target cluster is created from the source cluster restore point with the same configuration as the original source DB cluster, except that the new cluster is created with the default security group.

" }, "RestoreDBClusterToPointInTime":{ "name":"RestoreDBClusterToPointInTime", @@ -746,7 +746,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"StorageQuotaExceededFault"} ], - "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

" + "documentation":"

Restores a cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target cluster is created from the source cluster with the same configuration as the original cluster, except that the new cluster is created with the default security group.

" }, "StartDBCluster":{ "name":"StartDBCluster", @@ -851,7 +851,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified CIDR IP or Amazon EC2 security group isn't authorized for the specified DB security group.

Amazon DocumentDB also might not be authorized to perform necessary actions on your behalf using IAM.

", + "documentation":"

The specified CIDR IP or Amazon EC2 security group isn't authorized for the specified security group.

Amazon DocumentDB also might not be authorized to perform necessary actions on your behalf using IAM.

", "error":{ "code":"AuthorizationNotFound", "httpStatusCode":404, @@ -961,7 +961,7 @@ "documentation":"

The list of log types to disable.

" } }, - "documentation":"

The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine that is being used.

" + "documentation":"

The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific instance or cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the engine that is being used.

" }, "CopyDBClusterParameterGroupMessage":{ "type":"structure", @@ -973,15 +973,15 @@ "members":{ "SourceDBClusterParameterGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group.

Constraints:

  • Must specify a valid DB cluster parameter group.

  • If the source DB cluster parameter group is in the same AWS Region as the copy, specify a valid DB parameter group identifier; for example, my-db-cluster-param-group, or a valid ARN.

  • If the source DB parameter group is in a different AWS Region than the copy, specify a valid DB cluster parameter group ARN; for example, arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1.

" + "documentation":"

The identifier or Amazon Resource Name (ARN) for the source cluster parameter group.

Constraints:

  • Must specify a valid cluster parameter group.

  • If the source cluster parameter group is in the same AWS Region as the copy, specify a valid parameter group identifier; for example, my-db-cluster-param-group, or a valid ARN.

  • If the source parameter group is in a different AWS Region than the copy, specify a valid cluster parameter group ARN; for example, arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1.

" }, "TargetDBClusterParameterGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier for the copied DB cluster parameter group.

Constraints:

  • Cannot be null, empty, or blank.

  • Must contain from 1 to 255 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-param-group1

" + "documentation":"

The identifier for the copied cluster parameter group.

Constraints:

  • Cannot be null, empty, or blank.

  • Must contain from 1 to 255 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-param-group1

" }, "TargetDBClusterParameterGroupDescription":{ "shape":"String", - "documentation":"

A description for the copied DB cluster parameter group.

" + "documentation":"

A description for the copied cluster parameter group.

" }, "Tags":{ "shape":"TagList", @@ -1005,27 +1005,27 @@ "members":{ "SourceDBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case sensitive.

You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.

Constraints:

  • Must specify a valid system snapshot in the \"available\" state.

  • If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier.

  • If the source snapshot is in a different AWS Region than the copy, specify a valid DB cluster snapshot ARN.

Example: my-cluster-snapshot1

" + "documentation":"

The identifier of the cluster snapshot to copy. This parameter is not case sensitive.

You can't copy an encrypted, shared cluster snapshot from one AWS Region to another.

Constraints:

  • Must specify a valid system snapshot in the \"available\" state.

  • If the source snapshot is in the same AWS Region as the copy, specify a valid snapshot identifier.

  • If the source snapshot is in a different AWS Region than the copy, specify a valid cluster snapshot ARN.

Example: my-cluster-snapshot1

" }, "TargetDBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-snapshot2

" + "documentation":"

The identifier of the new cluster snapshot to create from the source cluster snapshot. This parameter is not case sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-snapshot2

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key ID for an encrypted DB cluster snapshot. The AWS KMS key ID is the Amazon Resource Name (ARN), AWS KMS key identifier, or the AWS KMS key alias for the AWS KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new AWS KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same AWS KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, set KmsKeyId to the AWS KMS key ID that you want to use to encrypt the copy of the DB cluster snapshot in the destination Region. AWS KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one Region in another Region.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" + "documentation":"

The AWS KMS key ID for an encrypted cluster snapshot. The AWS KMS key ID is the Amazon Resource Name (ARN), AWS KMS key identifier, or the AWS KMS key alias for the AWS KMS encryption key.

If you copy an encrypted cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new AWS KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the cluster snapshot is encrypted with the same AWS KMS key as the source cluster snapshot.

If you copy an encrypted cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted cluster snapshot to another AWS Region, set KmsKeyId to the AWS KMS key ID that you want to use to encrypt the copy of the cluster snapshot in the destination Region. AWS KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one Region in another Region.

If you copy an unencrypted cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. You must use the PreSignedUrl parameter when copying an encrypted DB cluster snapshot from another AWS Region.

The presigned URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The presigned URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the presigned URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:my-cluster-snapshot-20161115.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source cluster snapshot to copy. You must use the PreSignedUrl parameter when copying an encrypted cluster snapshot from another AWS Region.

The presigned URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The presigned URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the presigned URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

  • SourceDBClusterSnapshotIdentifier - The cluster snapshot identifier for the encrypted cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:my-cluster-snapshot-20161115.

" }, "CopyTags":{ "shape":"BooleanOptional", - "documentation":"

Set to true to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot, and otherwise false. The default is false.

" + "documentation":"

Set to true to copy all tags from the source cluster snapshot to the target cluster snapshot, and otherwise false. The default is false.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB cluster snapshot.

" + "documentation":"

The tags to be assigned to the cluster snapshot.

" } }, "documentation":"

Represents the input to CopyDBClusterSnapshot.

" @@ -1047,7 +1047,7 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

A list of Amazon EC2 Availability Zones that instances in the DB cluster can be created in.

" + "documentation":"

A list of Amazon EC2 Availability Zones that instances in the cluster can be created in.

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -1055,23 +1055,23 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The DB cluster identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster

" + "documentation":"

The cluster identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster

" }, "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group to associate with this DB cluster.

" + "documentation":"

The name of the cluster parameter group to associate with this cluster.

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of EC2 VPC security groups to associate with this DB cluster.

" + "documentation":"

A list of EC2 VPC security groups to associate with this cluster.

" }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

A DB subnet group to associate with this DB cluster.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + "documentation":"

A subnet group to associate with this cluster.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" }, "Engine":{ "shape":"String", - "documentation":"

The name of the database engine to be used for this DB cluster.

Valid values: docdb

" + "documentation":"

The name of the database engine to be used for this cluster.

Valid values: docdb

" }, "EngineVersion":{ "shape":"String", @@ -1079,11 +1079,11 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the instances in the DB cluster accept connections.

" + "documentation":"

The port number on which the instances in the cluster accept connections.

" }, "MasterUsername":{ "shape":"String", - "documentation":"

The name of the master user for the DB cluster.

Constraints:

  • Must be from 1 to 63 letters or numbers.

  • The first character must be a letter.

  • Cannot be a reserved word for the chosen database engine.

" + "documentation":"

The name of the master user for the cluster.

Constraints:

  • Must be from 1 to 63 letters or numbers.

  • The first character must be a letter.

  • Cannot be a reserved word for the chosen database engine.

" }, "MasterUserPassword":{ "shape":"String", @@ -1099,15 +1099,15 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB cluster.

" + "documentation":"

The tags to be assigned to the cluster.

" }, "StorageEncrypted":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the DB cluster is encrypted.

" + "documentation":"

Specifies whether the cluster is encrypted.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier for an encrypted DB cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a DB cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new DB cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If an encryption key is not specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon DocumentDB uses the encryption key that is used to encrypt the source. Otherwise, Amazon DocumentDB uses your default encryption key.

  • If the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, Amazon DocumentDB uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the replica in that AWS Region.

" + "documentation":"

The AWS KMS key identifier for an encrypted cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If an encryption key is not specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon DocumentDB uses the encryption key that is used to encrypt the source. Otherwise, Amazon DocumentDB uses your default encryption key.

  • If the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, Amazon DocumentDB uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a replica of an encrypted cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the replica in that AWS Region.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -1130,19 +1130,19 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group.

Constraints:

  • Must match the name of an existing DBClusterParameterGroup.

This value is stored as a lowercase string.

" + "documentation":"

The name of the cluster parameter group.

Constraints:

  • Must match the name of an existing DBClusterParameterGroup.

This value is stored as a lowercase string.

" }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The DB cluster parameter group family name.

" + "documentation":"

The cluster parameter group family name.

" }, "Description":{ "shape":"String", - "documentation":"

The description for the DB cluster parameter group.

" + "documentation":"

The description for the cluster parameter group.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB cluster parameter group.

" + "documentation":"

The tags to be assigned to the cluster parameter group.

" } }, "documentation":"

Represents the input of CreateDBClusterParameterGroup.

" @@ -1168,15 +1168,15 @@ "members":{ "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-snapshot1

" + "documentation":"

The identifier of the cluster snapshot. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-snapshot1

" }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster to create a snapshot for. This parameter is not case sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

Example: my-cluster

" + "documentation":"

The identifier of the cluster to create a snapshot for. This parameter is not case sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

Example: my-cluster

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB cluster snapshot.

" + "documentation":"

The tags to be assigned to the cluster snapshot.

" } }, "documentation":"

Represents the input of CreateDBClusterSnapshot.

" @@ -1198,11 +1198,11 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

" + "documentation":"

The instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

" }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the DB instance; for example, db.r5.large.

" + "documentation":"

The compute and memory capacity of the instance; for example, db.r5.large.

" }, "Engine":{ "shape":"String", @@ -1210,7 +1210,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Amazon EC2 Availability Zone that the DB instance is created in.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" + "documentation":"

The Amazon EC2 Availability Zone that the instance is created in.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" }, "PreferredMaintenanceWindow":{ "shape":"String", @@ -1218,15 +1218,15 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

Indicates that minor engine upgrades are applied automatically to the DB instance during the maintenance window.

Default: true

" + "documentation":"

Indicates that minor engine upgrades are applied automatically to the instance during the maintenance window.

Default: true

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB instance. You can assign up to 10 tags to an instance.

" + "documentation":"

The tags to be assigned to the instance. You can assign up to 10 tags to an instance.

" }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster that the instance will belong to.

" + "documentation":"

The identifier of the cluster that the instance will belong to.

" }, "PromotionTier":{ "shape":"IntegerOptional", @@ -1251,19 +1251,19 @@ "members":{ "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The name for the DB subnet group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.

Example: mySubnetgroup

" + "documentation":"

The name for the subnet group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.

Example: mySubnetgroup

" }, "DBSubnetGroupDescription":{ "shape":"String", - "documentation":"

The description for the DB subnet group.

" + "documentation":"

The description for the subnet group.

" }, "SubnetIds":{ "shape":"SubnetIdentifierList", - "documentation":"

The Amazon EC2 subnet IDs for the DB subnet group.

" + "documentation":"

The Amazon EC2 subnet IDs for the subnet group.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the DB subnet group.

" + "documentation":"

The tags to be assigned to the subnet group.

" } }, "documentation":"

Represents the input to CreateDBSubnetGroup.

" @@ -1279,27 +1279,27 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the DB cluster can be created in.

" + "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

Specifies the number of days for which automatic DB snapshots are retained.

" + "documentation":"

Specifies the number of days for which automatic snapshots are retained.

" }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

" + "documentation":"

Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.

" }, "DBClusterParameterGroup":{ "shape":"String", - "documentation":"

Specifies the name of the DB cluster parameter group for the DB cluster.

" + "documentation":"

Specifies the name of the cluster parameter group for the cluster.

" }, "DBSubnetGroup":{ "shape":"String", - "documentation":"

Specifies information on the subnet group that is associated with the DB cluster, including the name, description, and subnets in the subnet group.

" + "documentation":"

Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.

" }, "Status":{ "shape":"String", - "documentation":"

Specifies the current state of this DB cluster.

" + "documentation":"

Specifies the current state of this cluster.

" }, "PercentProgress":{ "shape":"String", @@ -1311,19 +1311,19 @@ }, "Endpoint":{ "shape":"String", - "documentation":"

Specifies the connection endpoint for the primary instance of the DB cluster.

" + "documentation":"

Specifies the connection endpoint for the primary instance of the cluster.

" }, "ReaderEndpoint":{ "shape":"String", - "documentation":"

The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load balances connections across the Amazon DocumentDB replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the DB cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your DB cluster.

If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.

" + "documentation":"

The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.

If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.

" }, "MultiAZ":{ "shape":"Boolean", - "documentation":"

Specifies whether the DB cluster has instances in multiple Availability Zones.

" + "documentation":"

Specifies whether the cluster has instances in multiple Availability Zones.

" }, "Engine":{ "shape":"String", - "documentation":"

Provides the name of the database engine to be used for this DB cluster.

" + "documentation":"

Provides the name of the database engine to be used for this cluster.

" }, "EngineVersion":{ "shape":"String", @@ -1339,7 +1339,7 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

Contains the master user name for the DB cluster.

" + "documentation":"

Contains the master user name for the cluster.

" }, "PreferredBackupWindow":{ "shape":"String", @@ -1351,11 +1351,11 @@ }, "DBClusterMembers":{ "shape":"DBClusterMemberList", - "documentation":"

Provides the list of instances that make up the DB cluster.

" + "documentation":"

Provides the list of instances that make up the cluster.

" }, "VpcSecurityGroups":{ "shape":"VpcSecurityGroupMembershipList", - "documentation":"

Provides a list of virtual private cloud (VPC) security groups that the DB cluster belongs to.

" + "documentation":"

Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.

" }, "HostedZoneId":{ "shape":"String", @@ -1363,45 +1363,45 @@ }, "StorageEncrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the DB cluster is encrypted.

" + "documentation":"

Specifies whether the cluster is encrypted.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB cluster.

" + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted cluster.

" }, "DbClusterResourceId":{ "shape":"String", - "documentation":"

The AWS Region-unique, immutable identifier for the DB cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB cluster is accessed.

" + "documentation":"

The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.

" }, "DBClusterArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the DB cluster.

" + "documentation":"

The Amazon Resource Name (ARN) for the cluster.

" }, "AssociatedRoles":{ "shape":"DBClusterRoles", - "documentation":"

Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other AWS services on your behalf.

" + "documentation":"

Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.

" }, "ClusterCreateTime":{ "shape":"TStamp", - "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + "documentation":"

Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).

" }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

A list of log types that this DB cluster is configured to export to Amazon CloudWatch Logs.

" + "documentation":"

A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.

" }, "DeletionProtection":{ "shape":"Boolean", "documentation":"

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

" } }, - "documentation":"

Detailed information about a DB cluster.

", + "documentation":"

Detailed information about a cluster.

", "wrapper":true }, "DBClusterAlreadyExistsFault":{ "type":"structure", "members":{ }, - "documentation":"

You already have a DB cluster with the given identifier.

", + "documentation":"

You already have a cluster with the given identifier.

", "error":{ "code":"DBClusterAlreadyExistsFault", "httpStatusCode":400, @@ -1421,22 +1421,22 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

Specifies the instance identifier for this member of the DB cluster.

" + "documentation":"

Specifies the instance identifier for this member of the cluster.

" }, "IsClusterWriter":{ "shape":"Boolean", - "documentation":"

A value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

" + "documentation":"

A value that is true if the cluster member is the primary instance for the cluster and false otherwise.

" }, "DBClusterParameterGroupStatus":{ "shape":"String", - "documentation":"

Specifies the status of the DB cluster parameter group for this member of the DB cluster.

" + "documentation":"

Specifies the status of the cluster parameter group for this member of the DB cluster.

" }, "PromotionTier":{ "shape":"IntegerOptional", "documentation":"

A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

" } }, - "documentation":"

Contains information about an instance that is part of a DB cluster.

", + "documentation":"

Contains information about an instance that is part of a cluster.

", "wrapper":true }, "DBClusterMemberList":{ @@ -1455,7 +1455,7 @@ }, "DBClusters":{ "shape":"DBClusterList", - "documentation":"

A list of DB clusters.

" + "documentation":"

A list of clusters.

" } }, "documentation":"

Represents the output of DescribeDBClusters.

" @@ -1464,7 +1464,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBClusterIdentifier doesn't refer to an existing DB cluster.

", + "documentation":"

DBClusterIdentifier doesn't refer to an existing cluster.

", "error":{ "code":"DBClusterNotFoundFault", "httpStatusCode":404, @@ -1477,22 +1477,22 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

Provides the name of the DB cluster parameter group.

" + "documentation":"

Provides the name of the cluster parameter group.

" }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.

" + "documentation":"

Provides the name of the parameter group family that this cluster parameter group is compatible with.

" }, "Description":{ "shape":"String", - "documentation":"

Provides the customer-specified description for this DB cluster parameter group.

" + "documentation":"

Provides the customer-specified description for this cluster parameter group.

" }, "DBClusterParameterGroupArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the DB cluster parameter group.

" + "documentation":"

The Amazon Resource Name (ARN) for the cluster parameter group.

" } }, - "documentation":"

Detailed information about a DB cluster parameter group.

", + "documentation":"

Detailed information about a cluster parameter group.

", "wrapper":true }, "DBClusterParameterGroupDetails":{ @@ -1500,7 +1500,7 @@ "members":{ "Parameters":{ "shape":"ParametersList", - "documentation":"

Provides a list of parameters for the DB cluster parameter group.

" + "documentation":"

Provides a list of parameters for the cluster parameter group.

" }, "Marker":{ "shape":"String", @@ -1521,16 +1521,16 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of a DB cluster parameter group.

Constraints:

  • Must be from 1 to 255 letters or numbers.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

This value is stored as a lowercase string.

" + "documentation":"

The name of a cluster parameter group.

Constraints:

  • Must be from 1 to 255 letters or numbers.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

This value is stored as a lowercase string.

" } }, - "documentation":"

Contains the name of a DB cluster parameter group.

" + "documentation":"

Contains the name of a cluster parameter group.

" }, "DBClusterParameterGroupNotFoundFault":{ "type":"structure", "members":{ }, - "documentation":"

DBClusterParameterGroupName doesn't refer to an existing DB cluster parameter group.

", + "documentation":"

DBClusterParameterGroupName doesn't refer to an existing cluster parameter group.

", "error":{ "code":"DBClusterParameterGroupNotFound", "httpStatusCode":404, @@ -1547,7 +1547,7 @@ }, "DBClusterParameterGroups":{ "shape":"DBClusterParameterGroupList", - "documentation":"

A list of DB cluster parameter groups.

" + "documentation":"

A list of cluster parameter groups.

" } }, "documentation":"

Represents the output of DBClusterParameterGroups.

" @@ -1556,7 +1556,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB cluster can't be created because you have reached the maximum allowed quota of DB clusters.

", + "documentation":"

The cluster can't be created because you have reached the maximum allowed quota of clusters.

", "error":{ "code":"DBClusterQuotaExceededFault", "httpStatusCode":403, @@ -1573,10 +1573,10 @@ }, "Status":{ "shape":"String", - "documentation":"

Describes the state of association between the IAM role and the DB cluster. The Status property returns one of the following values:

  • ACTIVE - The IAM role ARN is associated with the DB cluster and can be used to access other AWS services on your behalf.

  • PENDING - The IAM role ARN is being associated with the DB cluster.

  • INVALID - The IAM role ARN is associated with the DB cluster, but the DB cluster cannot assume the IAM role to access other AWS services on your behalf.

" + "documentation":"

Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:

  • ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.

  • PENDING - The IAM role ARN is being associated with the DB cluster.

  • INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.

" } }, - "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.

" + "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.

" }, "DBClusterRoles":{ "type":"list", @@ -1590,15 +1590,15 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the DB cluster snapshot can be restored in.

" + "documentation":"

Provides the list of Amazon EC2 Availability Zones that instances in the cluster snapshot can be restored in.

" }, "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

Specifies the identifier for the DB cluster snapshot.

" + "documentation":"

Specifies the identifier for the cluster snapshot.

" }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

" + "documentation":"

Specifies the cluster identifier of the cluster that this cluster snapshot was created from.

" }, "SnapshotCreateTime":{ "shape":"TStamp", @@ -1610,31 +1610,31 @@ }, "Status":{ "shape":"String", - "documentation":"

Specifies the status of this DB cluster snapshot.

" + "documentation":"

Specifies the status of this cluster snapshot.

" }, "Port":{ "shape":"Integer", - "documentation":"

Specifies the port that the DB cluster was listening on at the time of the snapshot.

" + "documentation":"

Specifies the port that the cluster was listening on at the time of the snapshot.

" }, "VpcId":{ "shape":"String", - "documentation":"

Provides the virtual private cloud (VPC) ID that is associated with the DB cluster snapshot.

" + "documentation":"

Provides the virtual private cloud (VPC) ID that is associated with the cluster snapshot.

" }, "ClusterCreateTime":{ "shape":"TStamp", - "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + "documentation":"

Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).

" }, "MasterUsername":{ "shape":"String", - "documentation":"

Provides the master user name for the DB cluster snapshot.

" + "documentation":"

Provides the master user name for the cluster snapshot.

" }, "EngineVersion":{ "shape":"String", - "documentation":"

Provides the version of the database engine for this DB cluster snapshot.

" + "documentation":"

Provides the version of the database engine for this cluster snapshot.

" }, "SnapshotType":{ "shape":"String", - "documentation":"

Provides the type of the DB cluster snapshot.

" + "documentation":"

Provides the type of the cluster snapshot.

" }, "PercentProgress":{ "shape":"Integer", @@ -1642,29 +1642,29 @@ }, "StorageEncrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the DB cluster snapshot is encrypted.

" + "documentation":"

Specifies whether the cluster snapshot is encrypted.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB cluster snapshot.

" + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted cluster snapshot.

" }, "DBClusterSnapshotArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the DB cluster snapshot.

" + "documentation":"

The Amazon Resource Name (ARN) for the cluster snapshot.

" }, "SourceDBClusterSnapshotArn":{ "shape":"String", - "documentation":"

If the DB cluster snapshot was copied from a source DB cluster snapshot, the ARN for the source DB cluster snapshot; otherwise, a null value.

" + "documentation":"

If the cluster snapshot was copied from a source cluster snapshot, the ARN for the source cluster snapshot; otherwise, a null value.

" } }, - "documentation":"

Detailed information about a DB cluster snapshot.

", + "documentation":"

Detailed information about a cluster snapshot.

", "wrapper":true }, "DBClusterSnapshotAlreadyExistsFault":{ "type":"structure", "members":{ }, - "documentation":"

You already have a DB cluster snapshot with the given identifier.

", + "documentation":"

You already have a cluster snapshot with the given identifier.

", "error":{ "code":"DBClusterSnapshotAlreadyExistsFault", "httpStatusCode":400, @@ -1677,14 +1677,14 @@ "members":{ "AttributeName":{ "shape":"String", - "documentation":"

The name of the manual DB cluster snapshot attribute.

The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual DB cluster snapshot.

" + "documentation":"

The name of the manual cluster snapshot attribute.

The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual cluster snapshot.

" }, "AttributeValues":{ "shape":"AttributeValueList", - "documentation":"

The values for the manual DB cluster snapshot attribute.

If the AttributeName field is set to restore, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all is in the list, then the manual DB cluster snapshot is public and available for any AWS account to copy or restore.

" + "documentation":"

The values for the manual cluster snapshot attribute.

If the AttributeName field is set to restore, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual cluster snapshot. If a value of all is in the list, then the manual cluster snapshot is public and available for any AWS account to copy or restore.

" } }, - "documentation":"

Contains the name and values of a manual DB cluster snapshot attribute.

Manual DB cluster snapshot attributes are used to authorize other AWS accounts to restore a manual DB cluster snapshot.

" + "documentation":"

Contains the name and values of a manual cluster snapshot attribute.

Manual cluster snapshot attributes are used to authorize other AWS accounts to restore a manual cluster snapshot.

" }, "DBClusterSnapshotAttributeList":{ "type":"list", @@ -1698,14 +1698,14 @@ "members":{ "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster snapshot that the attributes apply to.

" + "documentation":"

The identifier of the cluster snapshot that the attributes apply to.

" }, "DBClusterSnapshotAttributes":{ "shape":"DBClusterSnapshotAttributeList", - "documentation":"

The list of attributes and values for the DB cluster snapshot.

" + "documentation":"

The list of attributes and values for the cluster snapshot.

" } }, - "documentation":"

Detailed information about the attributes that are associated with a DB cluster snapshot.

", + "documentation":"

Detailed information about the attributes that are associated with a cluster snapshot.

", "wrapper":true }, "DBClusterSnapshotList":{ @@ -1724,7 +1724,7 @@ }, "DBClusterSnapshots":{ "shape":"DBClusterSnapshotList", - "documentation":"

Provides a list of DB cluster snapshots.

" + "documentation":"

Provides a list of cluster snapshots.

" } }, "documentation":"

Represents the output of DescribeDBClusterSnapshots.

" @@ -1733,7 +1733,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBClusterSnapshotIdentifier doesn't refer to an existing DB cluster snapshot.

", + "documentation":"

DBClusterSnapshotIdentifier doesn't refer to an existing cluster snapshot.

", "error":{ "code":"DBClusterSnapshotNotFoundFault", "httpStatusCode":404, @@ -1754,7 +1754,7 @@ }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the DB parameter group family for the database engine.

" + "documentation":"

The name of the parameter group family for the database engine.

" }, "DBEngineDescription":{ "shape":"String", @@ -1777,7 +1777,7 @@ "documentation":"

A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.

" } }, - "documentation":"

Detailed information about a DB engine version.

" + "documentation":"

Detailed information about an engine version.

" }, "DBEngineVersionList":{ "type":"list", @@ -1795,7 +1795,7 @@ }, "DBEngineVersions":{ "shape":"DBEngineVersionList", - "documentation":"

Detailed information about one or more DB engine versions.

" + "documentation":"

Detailed information about one or more engine versions.

" } }, "documentation":"

Represents the output of DescribeDBEngineVersions.

" @@ -1805,15 +1805,15 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

Contains a user-provided database identifier. This identifier is the unique key that identifies a DB instance.

" + "documentation":"

Contains a user-provided database identifier. This identifier is the unique key that identifies an instance.

" }, "DBInstanceClass":{ "shape":"String", - "documentation":"

Contains the name of the compute and memory capacity class of the DB instance.

" + "documentation":"

Contains the name of the compute and memory capacity class of the instance.

" }, "Engine":{ "shape":"String", - "documentation":"

Provides the name of the database engine to be used for this DB instance.

" + "documentation":"

Provides the name of the database engine to be used for this instance.

" }, "DBInstanceStatus":{ "shape":"String", @@ -1825,7 +1825,7 @@ }, "InstanceCreateTime":{ "shape":"TStamp", - "documentation":"

Provides the date and time that the DB instance was created.

" + "documentation":"

Provides the date and time that the instance was created.

" }, "PreferredBackupWindow":{ "shape":"String", @@ -1833,19 +1833,19 @@ }, "BackupRetentionPeriod":{ "shape":"Integer", - "documentation":"

Specifies the number of days for which automatic DB snapshots are retained.

" + "documentation":"

Specifies the number of days for which automatic snapshots are retained.

" }, "VpcSecurityGroups":{ "shape":"VpcSecurityGroupMembershipList", - "documentation":"

Provides a list of VPC security group elements that the DB instance belongs to.

" + "documentation":"

Provides a list of VPC security group elements that the instance belongs to.

" }, "AvailabilityZone":{ "shape":"String", - "documentation":"

Specifies the name of the Availability Zone that the DB instance is located in.

" + "documentation":"

Specifies the name of the Availability Zone that the instance is located in.

" }, "DBSubnetGroup":{ "shape":"DBSubnetGroup", - "documentation":"

Specifies information on the subnet group that is associated with the DB instance, including the name, description, and subnets in the subnet group.

" + "documentation":"

Specifies information on the subnet group that is associated with the instance, including the name, description, and subnets in the subnet group.

" }, "PreferredMaintenanceWindow":{ "shape":"String", @@ -1853,7 +1853,7 @@ }, "PendingModifiedValues":{ "shape":"PendingModifiedValues", - "documentation":"

Specifies that changes to the DB instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.

" + "documentation":"

Specifies that changes to the instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.

" }, "LatestRestorableTime":{ "shape":"TStamp", @@ -1877,19 +1877,19 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

Contains the name of the DB cluster that the DB instance is a member of if the DB instance is a member of a DB cluster.

" + "documentation":"

Contains the name of the cluster that the instance is a member of if the instance is a member of a cluster.

" }, "StorageEncrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether or not the DB instance is encrypted.

" + "documentation":"

Specifies whether or not the instance is encrypted.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB instance.

" + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted instance.

" }, "DbiResourceId":{ "shape":"String", - "documentation":"

The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.

" + "documentation":"

The AWS Region-unique, immutable identifier for the instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the instance is accessed.

" }, "CACertificateIdentifier":{ "shape":"String", @@ -1901,21 +1901,21 @@ }, "DBInstanceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the DB instance.

" + "documentation":"

The Amazon Resource Name (ARN) for the instance.

" }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

A list of log types that this DB instance is configured to export to Amazon CloudWatch Logs.

" + "documentation":"

A list of log types that this instance is configured to export to Amazon CloudWatch Logs.

" } }, - "documentation":"

Detailed information about a DB instance.

", + "documentation":"

Detailed information about an instance.

", "wrapper":true }, "DBInstanceAlreadyExistsFault":{ "type":"structure", "members":{ }, - "documentation":"

You already have a DB instance with the given identifier.

", + "documentation":"

You already have a instance with the given identifier.

", "error":{ "code":"DBInstanceAlreadyExists", "httpStatusCode":400, @@ -1939,7 +1939,7 @@ }, "DBInstances":{ "shape":"DBInstanceList", - "documentation":"

Detailed information about one or more DB instances.

" + "documentation":"

Detailed information about one or more instances.

" } }, "documentation":"

Represents the output of DescribeDBInstances.

" @@ -1948,7 +1948,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBInstanceIdentifier doesn't refer to an existing DB instance.

", + "documentation":"

DBInstanceIdentifier doesn't refer to an existing instance.

", "error":{ "code":"DBInstanceNotFound", "httpStatusCode":404, @@ -1969,14 +1969,14 @@ }, "Status":{ "shape":"String", - "documentation":"

Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

" + "documentation":"

Status of the instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

" }, "Message":{ "shape":"String", "documentation":"

Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

" } }, - "documentation":"

Provides a list of status information for a DB instance.

" + "documentation":"

Provides a list of status information for an instance.

" }, "DBInstanceStatusInfoList":{ "type":"list", @@ -1989,7 +1989,7 @@ "type":"structure", "members":{ }, - "documentation":"

A DB parameter group with the same name already exists.

", + "documentation":"

A parameter group with the same name already exists.

", "error":{ "code":"DBParameterGroupAlreadyExists", "httpStatusCode":400, @@ -2001,7 +2001,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBParameterGroupName doesn't refer to an existing DB parameter group.

", + "documentation":"

DBParameterGroupName doesn't refer to an existing parameter group.

", "error":{ "code":"DBParameterGroupNotFound", "httpStatusCode":404, @@ -2013,7 +2013,7 @@ "type":"structure", "members":{ }, - "documentation":"

This request would cause you to exceed the allowed number of DB parameter groups.

", + "documentation":"

This request would cause you to exceed the allowed number of parameter groups.

", "error":{ "code":"DBParameterGroupQuotaExceeded", "httpStatusCode":400, @@ -2025,7 +2025,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBSecurityGroupName doesn't refer to an existing DB security group.

", + "documentation":"

DBSecurityGroupName doesn't refer to an existing security group.

", "error":{ "code":"DBSecurityGroupNotFound", "httpStatusCode":404, @@ -2049,7 +2049,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBSnapshotIdentifier doesn't refer to an existing DB snapshot.

", + "documentation":"

DBSnapshotIdentifier doesn't refer to an existing snapshot.

", "error":{ "code":"DBSnapshotNotFound", "httpStatusCode":404, @@ -2062,37 +2062,37 @@ "members":{ "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The name of the DB subnet group.

" + "documentation":"

The name of the subnet group.

" }, "DBSubnetGroupDescription":{ "shape":"String", - "documentation":"

Provides the description of the DB subnet group.

" + "documentation":"

Provides the description of the subnet group.

" }, "VpcId":{ "shape":"String", - "documentation":"

Provides the virtual private cloud (VPC) ID of the DB subnet group.

" + "documentation":"

Provides the virtual private cloud (VPC) ID of the subnet group.

" }, "SubnetGroupStatus":{ "shape":"String", - "documentation":"

Provides the status of the DB subnet group.

" + "documentation":"

Provides the status of the subnet group.

" }, "Subnets":{ "shape":"SubnetList", - "documentation":"

Detailed information about one or more subnets within a DB subnet group.

" + "documentation":"

Detailed information about one or more subnets within a subnet group.

" }, "DBSubnetGroupArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) for the DB subnet group.

" } }, - "documentation":"

Detailed information about a DB subnet group.

", + "documentation":"

Detailed information about a subnet group.

", "wrapper":true }, "DBSubnetGroupAlreadyExistsFault":{ "type":"structure", "members":{ }, - "documentation":"

DBSubnetGroupName is already being used by an existing DB subnet group.

", + "documentation":"

DBSubnetGroupName is already being used by an existing subnet group.

", "error":{ "code":"DBSubnetGroupAlreadyExists", "httpStatusCode":400, @@ -2104,7 +2104,7 @@ "type":"structure", "members":{ }, - "documentation":"

Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

", + "documentation":"

Subnets in the subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

", "error":{ "code":"DBSubnetGroupDoesNotCoverEnoughAZs", "httpStatusCode":400, @@ -2121,7 +2121,7 @@ }, "DBSubnetGroups":{ "shape":"DBSubnetGroups", - "documentation":"

Detailed information about one or more DB subnet groups.

" + "documentation":"

Detailed information about one or more subnet groups.

" } }, "documentation":"

Represents the output of DescribeDBSubnetGroups.

" @@ -2130,7 +2130,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBSubnetGroupName doesn't refer to an existing DB subnet group.

", + "documentation":"

DBSubnetGroupName doesn't refer to an existing subnet group.

", "error":{ "code":"DBSubnetGroupNotFoundFault", "httpStatusCode":404, @@ -2142,7 +2142,7 @@ "type":"structure", "members":{ }, - "documentation":"

The request would cause you to exceed the allowed number of DB subnet groups.

", + "documentation":"

The request would cause you to exceed the allowed number of subnet groups.

", "error":{ "code":"DBSubnetGroupQuotaExceeded", "httpStatusCode":400, @@ -2161,7 +2161,7 @@ "type":"structure", "members":{ }, - "documentation":"

The request would cause you to exceed the allowed number of subnets in a DB subnet group.

", + "documentation":"

The request would cause you to exceed the allowed number of subnets in a subnet group.

", "error":{ "code":"DBSubnetQuotaExceededFault", "httpStatusCode":400, @@ -2173,7 +2173,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB upgrade failed because a resource that the DB depends on can't be modified.

", + "documentation":"

The upgrade failed because a resource that the depends on can't be modified.

", "error":{ "code":"DBUpgradeDependencyFailure", "httpStatusCode":400, @@ -2187,15 +2187,15 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case sensitive.

Constraints:

  • Must match an existing DBClusterIdentifier.

" + "documentation":"

The cluster identifier for the cluster to be deleted. This parameter isn't case sensitive.

Constraints:

  • Must match an existing DBClusterIdentifier.

" }, "SkipFinalSnapshot":{ "shape":"Boolean", - "documentation":"

Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If true is specified, no DB cluster snapshot is created. If false is specified, a DB cluster snapshot is created before the DB cluster is deleted.

If SkipFinalSnapshot is false, you must specify a FinalDBSnapshotIdentifier parameter.

Default: false

" + "documentation":"

Determines whether a final cluster snapshot is created before the cluster is deleted. If true is specified, no cluster snapshot is created. If false is specified, a cluster snapshot is created before the DB cluster is deleted.

If SkipFinalSnapshot is false, you must specify a FinalDBSnapshotIdentifier parameter.

Default: false

" }, "FinalDBSnapshotIdentifier":{ "shape":"String", - "documentation":"

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false.

Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

Constraints:

  • Must be from 1 to 255 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" + "documentation":"

The cluster snapshot identifier of the new cluster snapshot created when SkipFinalSnapshot is set to false.

Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

Constraints:

  • Must be from 1 to 255 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" } }, "documentation":"

Represents the input to DeleteDBCluster.

" @@ -2206,7 +2206,7 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group.

Constraints:

  • Must be the name of an existing DB cluster parameter group.

  • You can't delete a default DB cluster parameter group.

  • Cannot be associated with any DB clusters.

" + "documentation":"

The name of the cluster parameter group.

Constraints:

  • Must be the name of an existing cluster parameter group.

  • You can't delete a default cluster parameter group.

  • Cannot be associated with any clusters.

" } }, "documentation":"

Represents the input to DeleteDBClusterParameterGroup.

" @@ -2223,7 +2223,7 @@ "members":{ "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster snapshot to delete.

Constraints: Must be the name of an existing DB cluster snapshot in the available state.

" + "documentation":"

The identifier of the cluster snapshot to delete.

Constraints: Must be the name of an existing cluster snapshot in the available state.

" } }, "documentation":"

Represents the input to DeleteDBClusterSnapshot.

" @@ -2240,7 +2240,7 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive.

Constraints:

  • Must match the name of an existing DB instance.

" + "documentation":"

The instance identifier for the instance to be deleted. This parameter isn't case sensitive.

Constraints:

  • Must match the name of an existing instance.

" } }, "documentation":"

Represents the input to DeleteDBInstance.

" @@ -2288,7 +2288,7 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of a specific DB cluster parameter group to return details for.

Constraints:

  • If provided, must match the name of an existing DBClusterParameterGroup.

" + "documentation":"

The name of a specific cluster parameter group to return details for.

Constraints:

  • If provided, must match the name of an existing DBClusterParameterGroup.

" }, "Filters":{ "shape":"FilterList", @@ -2311,7 +2311,7 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of a specific DB cluster parameter group to return parameter details for.

Constraints:

  • If provided, must match the name of an existing DBClusterParameterGroup.

" + "documentation":"

The name of a specific cluster parameter group to return parameter details for.

Constraints:

  • If provided, must match the name of an existing DBClusterParameterGroup.

" }, "Source":{ "shape":"String", @@ -2338,7 +2338,7 @@ "members":{ "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier for the DB cluster snapshot to describe the attributes for.

" + "documentation":"

The identifier for the cluster snapshot to describe the attributes for.

" } }, "documentation":"

Represents the input to DescribeDBClusterSnapshotAttributes.

" @@ -2354,15 +2354,15 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The ID of the DB cluster to retrieve the list of DB cluster snapshots for. This parameter can't be used with the DBClusterSnapshotIdentifier parameter. This parameter is not case sensitive.

Constraints:

  • If provided, must match the identifier of an existing DBCluster.

" + "documentation":"

The ID of the cluster to retrieve the list of cluster snapshots for. This parameter can't be used with the DBClusterSnapshotIdentifier parameter. This parameter is not case sensitive.

Constraints:

  • If provided, must match the identifier of an existing DBCluster.

" }, "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

A specific DB cluster snapshot identifier to describe. This parameter can't be used with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

Constraints:

  • If provided, must match the identifier of an existing DBClusterSnapshot.

  • If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

" + "documentation":"

A specific cluster snapshot identifier to describe. This parameter can't be used with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

Constraints:

  • If provided, must match the identifier of an existing DBClusterSnapshot.

  • If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

" }, "SnapshotType":{ "shape":"String", - "documentation":"

The type of DB cluster snapshots to be returned. You can specify one of the following values:

  • automated - Return all DB cluster snapshots that Amazon DocumentDB has automatically created for your AWS account.

  • manual - Return all DB cluster snapshots that you have manually created for your AWS account.

  • shared - Return all manual DB cluster snapshots that have been shared to your AWS account.

  • public - Return all DB cluster snapshots that have been marked as public.

If you don't specify a SnapshotType value, then both automated and manual DB cluster snapshots are returned. You can include shared DB cluster snapshots with these results by setting the IncludeShared parameter to true. You can include public DB cluster snapshots with these results by setting the IncludePublic parameter to true.

The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

" + "documentation":"

The type of cluster snapshots to be returned. You can specify one of the following values:

  • automated - Return all cluster snapshots that Amazon DocumentDB has automatically created for your AWS account.

  • manual - Return all cluster snapshots that you have manually created for your AWS account.

  • shared - Return all manual cluster snapshots that have been shared to your AWS account.

  • public - Return all cluster snapshots that have been marked as public.

If you don't specify a SnapshotType value, then both automated and manual cluster snapshots are returned. You can include shared cluster snapshots with these results by setting the IncludeShared parameter to true. You can include public cluster snapshots with these results by setting the IncludePublic parameter to true.

The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

" }, "Filters":{ "shape":"FilterList", @@ -2378,11 +2378,11 @@ }, "IncludeShared":{ "shape":"Boolean", - "documentation":"

Set to true to include shared manual DB cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false. The default is false.

" + "documentation":"

Set to true to include shared manual cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false. The default is false.

" }, "IncludePublic":{ "shape":"Boolean", - "documentation":"

Set to true to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false. The default is false.

" + "documentation":"

Set to true to include manual cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false. The default is false.

" } }, "documentation":"

Represents the input to DescribeDBClusterSnapshots.

" @@ -2392,11 +2392,11 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The user-provided DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case sensitive.

Constraints:

  • If provided, must match an existing DBClusterIdentifier.

" + "documentation":"

The user-provided cluster identifier. If this parameter is specified, information from only the specific cluster is returned. This parameter isn't case sensitive.

Constraints:

  • If provided, must match an existing DBClusterIdentifier.

" }, "Filters":{ "shape":"FilterList", - "documentation":"

A filter that specifies one or more DB clusters to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB clusters identified by these ARNs.

" + "documentation":"

A filter that specifies one or more clusters to describe.

Supported filters:

  • db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names (ARNs). The results list only includes information about the clusters identified by these ARNs.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -2422,7 +2422,7 @@ }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of a specific DB parameter group family to return details for.

Constraints:

  • If provided, must match an existing DBParameterGroupFamily.

" + "documentation":"

The name of a specific parameter group family to return details for.

Constraints:

  • If provided, must match an existing DBParameterGroupFamily.

" }, "Filters":{ "shape":"FilterList", @@ -2456,11 +2456,11 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The user-provided instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive.

Constraints:

  • If provided, must match the identifier of an existing DBInstance.

" + "documentation":"

The user-provided instance identifier. If this parameter is specified, information from only the specific instance is returned. This parameter isn't case sensitive.

Constraints:

  • If provided, must match the identifier of an existing DBInstance.

" }, "Filters":{ "shape":"FilterList", - "documentation":"

A filter that specifies one or more DB instances to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list includes only the information about the DB instances that are associated with the DB clusters that are identified by these ARNs.

  • db-instance-id - Accepts DB instance identifiers and DB instance ARNs. The results list includes only the information about the DB instances that are identified by these ARNs.

" + "documentation":"

A filter that specifies one or more instances to describe.

Supported filters:

  • db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names (ARNs). The results list includes only the information about the instances that are associated with the clusters that are identified by these ARNs.

  • db-instance-id - Accepts instance identifiers and instance ARNs. The results list includes only the information about the instances that are identified by these ARNs.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -2478,7 +2478,7 @@ "members":{ "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The name of the DB subnet group to return details for.

" + "documentation":"

The name of the subnet group to return details for.

" }, "Filters":{ "shape":"FilterList", @@ -2501,7 +2501,7 @@ "members":{ "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group family to return the engine parameter information for.

" + "documentation":"

The name of the cluster parameter group family to return the engine parameter information for.

" }, "Filters":{ "shape":"FilterList", @@ -2586,7 +2586,7 @@ "members":{ "Engine":{ "shape":"String", - "documentation":"

The name of the engine to retrieve DB instance options for.

" + "documentation":"

The name of the engine to retrieve instance options for.

" }, "EngineVersion":{ "shape":"String", @@ -2594,7 +2594,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The DB instance class filter value. Specify this parameter to show only the available offerings that match the specified DB instance class.

" + "documentation":"

The instance class filter value. Specify this parameter to show only the available offerings that match the specified instance class.

" }, "LicenseModel":{ "shape":"String", @@ -2628,7 +2628,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

A filter that specifies one or more resources to return pending maintenance actions for.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list includes only pending maintenance actions for the DB clusters identified by these ARNs.

  • db-instance-id - Accepts DB instance identifiers and DB instance ARNs. The results list includes only pending maintenance actions for the DB instances identified by these ARNs.

" + "documentation":"

A filter that specifies one or more resources to return pending maintenance actions for.

Supported filters:

  • db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names (ARNs). The results list includes only pending maintenance actions for the clusters identified by these ARNs.

  • db-instance-id - Accepts instance identifiers and instance ARNs. The results list includes only pending maintenance actions for the DB instances identified by these ARNs.

" }, "Marker":{ "shape":"String", @@ -2646,7 +2646,7 @@ "members":{ "Address":{ "shape":"String", - "documentation":"

Specifies the DNS address of the DB instance.

" + "documentation":"

Specifies the DNS address of the instance.

" }, "Port":{ "shape":"Integer", @@ -2657,14 +2657,14 @@ "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" } }, - "documentation":"

Network information for accessing a DB cluster or DB instance. Client programs must specify a valid endpoint to access these Amazon DocumentDB resources.

" + "documentation":"

Network information for accessing a cluster or instance. Client programs must specify a valid endpoint to access these Amazon DocumentDB resources.

" }, "EngineDefaults":{ "type":"structure", "members":{ "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group family to return the engine parameter information for.

" + "documentation":"

The name of the cluster parameter group family to return the engine parameter information for.

" }, "Marker":{ "shape":"String", @@ -2672,7 +2672,7 @@ }, "Parameters":{ "shape":"ParametersList", - "documentation":"

The parameters of a particular DB cluster parameter group family.

" + "documentation":"

The parameters of a particular cluster parameter group family.

" } }, "documentation":"

Contains the result of a successful invocation of the DescribeEngineDefaultClusterParameters operation.

", @@ -2773,11 +2773,11 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

A DB cluster identifier to force a failover for. This parameter is not case sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

" + "documentation":"

A cluster identifier to force a failover for. This parameter is not case sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

" }, "TargetDBInstanceIdentifier":{ "shape":"String", - "documentation":"

The name of the instance to promote to the primary instance.

You must specify the instance identifier for an Amazon DocumentDB replica in the DB cluster. For example, mydbcluster-replica1.

" + "documentation":"

The name of the instance to promote to the primary instance.

You must specify the instance identifier for an Amazon DocumentDB replica in the cluster. For example, mydbcluster-replica1.

" } }, "documentation":"

Represents the input to FailoverDBCluster.

" @@ -2824,7 +2824,7 @@ "type":"structure", "members":{ }, - "documentation":"

The request would cause you to exceed the allowed number of DB instances.

", + "documentation":"

The request would cause you to exceed the allowed number of instances.

", "error":{ "code":"InstanceQuotaExceeded", "httpStatusCode":400, @@ -2836,7 +2836,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB cluster doesn't have enough capacity for the current operation.

", + "documentation":"

The cluster doesn't have enough capacity for the current operation.

", "error":{ "code":"InsufficientDBClusterCapacityFault", "httpStatusCode":403, @@ -2848,7 +2848,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified DB instance class isn't available in the specified Availability Zone.

", + "documentation":"

The specified instance class isn't available in the specified Availability Zone.

", "error":{ "code":"InsufficientDBInstanceCapacity", "httpStatusCode":400, @@ -2874,7 +2874,7 @@ "type":"structure", "members":{ }, - "documentation":"

The provided value isn't a valid DB cluster snapshot state.

", + "documentation":"

The provided value isn't a valid cluster snapshot state.

", "error":{ "code":"InvalidDBClusterSnapshotStateFault", "httpStatusCode":400, @@ -2886,7 +2886,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB cluster isn't in a valid state.

", + "documentation":"

The cluster isn't in a valid state.

", "error":{ "code":"InvalidDBClusterStateFault", "httpStatusCode":400, @@ -2898,7 +2898,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified DB instance isn't in the available state.

", + "documentation":"

The specified instance isn't in the available state.

", "error":{ "code":"InvalidDBInstanceState", "httpStatusCode":400, @@ -2910,7 +2910,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB parameter group is in use, or it is in a state that is not valid. If you are trying to delete the parameter group, you can't delete it when the parameter group is in this state.

", + "documentation":"

The parameter group is in use, or it is in a state that is not valid. If you are trying to delete the parameter group, you can't delete it when the parameter group is in this state.

", "error":{ "code":"InvalidDBParameterGroupState", "httpStatusCode":400, @@ -2922,7 +2922,7 @@ "type":"structure", "members":{ }, - "documentation":"

The state of the DB security group doesn't allow deletion.

", + "documentation":"

The state of the security group doesn't allow deletion.

", "error":{ "code":"InvalidDBSecurityGroupState", "httpStatusCode":400, @@ -2934,7 +2934,7 @@ "type":"structure", "members":{ }, - "documentation":"

The state of the DB snapshot doesn't allow deletion.

", + "documentation":"

The state of the snapshot doesn't allow deletion.

", "error":{ "code":"InvalidDBSnapshotState", "httpStatusCode":400, @@ -2946,7 +2946,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB subnet group can't be deleted because it's in use.

", + "documentation":"

The subnet group can't be deleted because it's in use.

", "error":{ "code":"InvalidDBSubnetGroupStateFault", "httpStatusCode":400, @@ -2958,7 +2958,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB subnet isn't in the available state.

", + "documentation":"

The subnet isn't in the available state.

", "error":{ "code":"InvalidDBSubnetStateFault", "httpStatusCode":400, @@ -2994,7 +2994,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB subnet group doesn't cover all Availability Zones after it is created because of changes that were made.

", + "documentation":"

The subnet group doesn't cover all Availability Zones after it is created because of changes that were made.

", "error":{ "code":"InvalidVPCNetworkStateFault", "httpStatusCode":400, @@ -3043,15 +3043,15 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The DB cluster identifier for the cluster that is being modified. This parameter is not case sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

" + "documentation":"

The cluster identifier for the cluster that is being modified. This parameter is not case sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

" }, "NewDBClusterIdentifier":{ "shape":"String", - "documentation":"

The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster2

" + "documentation":"

The new cluster identifier for the cluster when renaming a cluster. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster2

" }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

A value that specifies whether the changes in this request and any pending changes are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

The ApplyImmediately parameter affects only the NewDBClusterIdentifier and MasterUserPassword values. If you set this parameter value to false, the changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

Default: false

" + "documentation":"

A value that specifies whether the changes in this request and any pending changes are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cluster. If this parameter is set to false, changes to the cluster are applied during the next maintenance window.

The ApplyImmediately parameter affects only the NewDBClusterIdentifier and MasterUserPassword values. If you set this parameter value to false, the changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

Default: false

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -3059,15 +3059,15 @@ }, "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group to use for the DB cluster.

" + "documentation":"

The name of the cluster parameter group to use for the cluster.

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of virtual private cloud (VPC) security groups that the DB cluster will belong to.

" + "documentation":"

A list of virtual private cloud (VPC) security groups that the cluster will belong to.

" }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the DB cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The same port as the original DB cluster.

" + "documentation":"

The port number on which the cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The same port as the original cluster.

" }, "MasterUserPassword":{ "shape":"String", @@ -3083,7 +3083,7 @@ }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", - "documentation":"

The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific DB instance or DB cluster. The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs.

" + "documentation":"

The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific instance or cluster. The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs.

" }, "EngineVersion":{ "shape":"String", @@ -3105,11 +3105,11 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group to modify.

" + "documentation":"

The name of the cluster parameter group to modify.

" }, "Parameters":{ "shape":"ParametersList", - "documentation":"

A list of parameters in the DB cluster parameter group to modify.

" + "documentation":"

A list of parameters in the cluster parameter group to modify.

" } }, "documentation":"

Represents the input to ModifyDBClusterParameterGroup.

" @@ -3129,19 +3129,19 @@ "members":{ "DBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier for the DB cluster snapshot to modify the attributes for.

" + "documentation":"

The identifier for the cluster snapshot to modify the attributes for.

" }, "AttributeName":{ "shape":"String", - "documentation":"

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

" + "documentation":"

The name of the cluster snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual cluster snapshot, set this value to restore.

" }, "ValuesToAdd":{ "shape":"AttributeValueList", - "documentation":"

A list of DB cluster snapshot attributes to add to the attribute specified by AttributeName.

To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs. To make the manual DB cluster snapshot restorable by any AWS account, set it to all. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want to be available to all AWS accounts.

" + "documentation":"

A list of cluster snapshot attributes to add to the attribute specified by AttributeName.

To authorize other AWS accounts to copy or restore a manual cluster snapshot, set this list to include one or more AWS account IDs. To make the manual cluster snapshot restorable by any AWS account, set it to all. Do not add the all value for any manual cluster snapshots that contain private information that you don't want to be available to all AWS accounts.

" }, "ValuesToRemove":{ "shape":"AttributeValueList", - "documentation":"

A list of DB cluster snapshot attributes to remove from the attribute specified by AttributeName.

To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers. To remove authorization for any AWS account to copy or restore the DB cluster snapshot, set it to all . If you specify all, an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual DB cluster snapshot.

" + "documentation":"

A list of cluster snapshot attributes to remove from the attribute specified by AttributeName.

To remove authorization for other AWS accounts to copy or restore a manual cluster snapshot, set this list to include one or more AWS account identifiers. To remove authorization for any AWS account to copy or restore the cluster snapshot, set it to all . If you specify all, an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual cluster snapshot.

" } }, "documentation":"

Represents the input to ModifyDBClusterSnapshotAttribute.

" @@ -3158,27 +3158,27 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

" + "documentation":"

The instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

" }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The new compute and memory capacity of the DB instance; for example, db.r5.large. Not all DB instance classes are available in all AWS Regions.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting.

" + "documentation":"

The new compute and memory capacity of the instance; for example, db.r5.large. Not all instance classes are available in all AWS Regions.

If you modify the instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting.

" }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next reboot.

Default: false

" + "documentation":"

Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the instance.

If this parameter is set to false, changes to the instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next reboot.

Default: false

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, changing this parameter causes a reboot of the DB instance. If you are moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure that pending changes are applied.

Default: Uses existing setting.

Format: ddd:hh24:mi-ddd:hh24:mi

Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Must be at least 30 minutes.

" + "documentation":"

The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, changing this parameter causes a reboot of the instance. If you are moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure that pending changes are applied.

Default: Uses existing setting.

Format: ddd:hh24:mi-ddd:hh24:mi

Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Must be at least 30 minutes.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case, and the change is asynchronously applied as soon as possible. An outage results if this parameter is set to true during the maintenance window, and a newer minor version is available, and Amazon DocumentDB has enabled automatic patching for that engine version.

" + "documentation":"

Indicates that minor version upgrades are applied automatically to the instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case, and the change is asynchronously applied as soon as possible. An outage results if this parameter is set to true during the maintenance window, and a newer minor version is available, and Amazon DocumentDB has enabled automatic patching for that engine version.

" }, "NewDBInstanceIdentifier":{ "shape":"String", - "documentation":"

The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot occurs immediately if you set Apply Immediately to true. It occurs during the next maintenance window if you set Apply Immediately to false. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

" + "documentation":"

The new instance identifier for the instance when renaming an instance. When you change the instance identifier, an instance reboot occurs immediately if you set Apply Immediately to true. It occurs during the next maintenance window if you set Apply Immediately to false. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

" }, "CACertificateIdentifier":{ "shape":"String", @@ -3206,15 +3206,15 @@ "members":{ "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + "documentation":"

The name for the subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" }, "DBSubnetGroupDescription":{ "shape":"String", - "documentation":"

The description for the DB subnet group.

" + "documentation":"

The description for the subnet group.

" }, "SubnetIds":{ "shape":"SubnetIdentifierList", - "documentation":"

The Amazon EC2 subnet IDs for the DB subnet group.

" + "documentation":"

The Amazon EC2 subnet IDs for the subnet group.

" } }, "documentation":"

Represents the input to ModifyDBSubnetGroup.

" @@ -3230,30 +3230,30 @@ "members":{ "Engine":{ "shape":"String", - "documentation":"

The engine type of a DB instance.

" + "documentation":"

The engine type of an instance.

" }, "EngineVersion":{ "shape":"String", - "documentation":"

The engine version of a DB instance.

" + "documentation":"

The engine version of an instance.

" }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The DB instance class for a DB instance.

" + "documentation":"

The instance class for an instance.

" }, "LicenseModel":{ "shape":"String", - "documentation":"

The license model for a DB instance.

" + "documentation":"

The license model for an instance.

" }, "AvailabilityZones":{ "shape":"AvailabilityZoneList", - "documentation":"

A list of Availability Zones for a DB instance.

" + "documentation":"

A list of Availability Zones for an instance.

" }, "Vpc":{ "shape":"Boolean", - "documentation":"

Indicates whether a DB instance is in a virtual private cloud (VPC).

" + "documentation":"

Indicates whether an instance is in a virtual private cloud (VPC).

" } }, - "documentation":"

The options that are available for a DB instance.

", + "documentation":"

The options that are available for an instance.

", "wrapper":true }, "OrderableDBInstanceOptionsList":{ @@ -3268,7 +3268,7 @@ "members":{ "OrderableDBInstanceOptions":{ "shape":"OrderableDBInstanceOptionsList", - "documentation":"

The options that are available for a particular orderable DB instance.

" + "documentation":"

The options that are available for a particular orderable instance.

" }, "Marker":{ "shape":"String", @@ -3407,19 +3407,19 @@ "members":{ "DBInstanceClass":{ "shape":"String", - "documentation":"

Contains the new DBInstanceClass for the DB instance that will be applied or is currently being applied.

" + "documentation":"

Contains the new DBInstanceClass for the instance that will be applied or is currently being applied.

" }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

Contains the new AllocatedStorage size for the DB instance that will be applied or is currently being applied.

" + "documentation":"

Contains the new AllocatedStorage size for then instance that will be applied or is currently being applied.

" }, "MasterUserPassword":{ "shape":"String", - "documentation":"

Contains the pending or currently in-progress change of the master credentials for the DB instance.

" + "documentation":"

Contains the pending or currently in-progress change of the master credentials for the instance.

" }, "Port":{ "shape":"IntegerOptional", - "documentation":"

Specifies the pending port for the DB instance.

" + "documentation":"

Specifies the pending port for the instance.

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -3427,7 +3427,7 @@ }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

" + "documentation":"

Indicates that the Single-AZ instance is to change to a Multi-AZ deployment.

" }, "EngineVersion":{ "shape":"String", @@ -3435,19 +3435,19 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"

The license model for the DB instance.

Valid values: license-included, bring-your-own-license, general-public-license

" + "documentation":"

The license model for the instance.

Valid values: license-included, bring-your-own-license, general-public-license

" }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

Specifies the new Provisioned IOPS value for the DB instance that will be applied or is currently being applied.

" + "documentation":"

Specifies the new Provisioned IOPS value for the instance that will be applied or is currently being applied.

" }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

Contains the new DBInstanceIdentifier for the DB instance that will be applied or is currently being applied.

" + "documentation":"

Contains the new DBInstanceIdentifier for the instance that will be applied or is currently being applied.

" }, "StorageType":{ "shape":"String", - "documentation":"

Specifies the storage type to be associated with the DB instance.

" + "documentation":"

Specifies the storage type to be associated with the instance.

" }, "CACertificateIdentifier":{ "shape":"String", @@ -3455,14 +3455,14 @@ }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The new DB subnet group for the DB instance.

" + "documentation":"

The new subnet group for the instance.

" }, "PendingCloudwatchLogsExports":{ "shape":"PendingCloudwatchLogsExports", "documentation":"

A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.

" } }, - "documentation":"

One or more modified settings for a DB instance. These modified settings have been requested, but haven't been applied yet.

" + "documentation":"

One or more modified settings for an instance. These modified settings have been requested, but haven't been applied yet.

" }, "RebootDBInstanceMessage":{ "type":"structure", @@ -3470,7 +3470,7 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

" + "documentation":"

The instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

" }, "ForceFailover":{ "shape":"BooleanOptional", @@ -3509,15 +3509,15 @@ "members":{ "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB cluster parameter group to reset.

" + "documentation":"

The name of the cluster parameter group to reset.

" }, "ResetAllParameters":{ "shape":"Boolean", - "documentation":"

A value that is set to true to reset all parameters in the DB cluster parameter group to their default values, and false otherwise. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter.

" + "documentation":"

A value that is set to true to reset all parameters in the cluster parameter group to their default values, and false otherwise. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter.

" }, "Parameters":{ "shape":"ParametersList", - "documentation":"

A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is set to true.

" + "documentation":"

A list of parameter names in the cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is set to true.

" } }, "documentation":"

Represents the input to ResetDBClusterParameterGroup.

" @@ -3563,39 +3563,39 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. This parameter isn't case sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-snapshot-id

" + "documentation":"

The name of the cluster to create from the snapshot or cluster snapshot. This parameter isn't case sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-snapshot-id

" }, "SnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier for the DB snapshot or DB cluster snapshot to restore from.

You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.

Constraints:

  • Must match the identifier of an existing snapshot.

" + "documentation":"

The identifier for the snapshot or cluster snapshot to restore from.

You can use either the name or the Amazon Resource Name (ARN) to specify a cluster snapshot. However, you can use only the ARN to specify a snapshot.

Constraints:

  • Must match the identifier of an existing snapshot.

" }, "Engine":{ "shape":"String", - "documentation":"

The database engine to use for the new DB cluster.

Default: The same as source.

Constraint: Must be compatible with the engine of the source.

" + "documentation":"

The database engine to use for the new cluster.

Default: The same as source.

Constraint: Must be compatible with the engine of the source.

" }, "EngineVersion":{ "shape":"String", - "documentation":"

The version of the database engine to use for the new DB cluster.

" + "documentation":"

The version of the database engine to use for the new cluster.

" }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The same port as the original DB cluster.

" + "documentation":"

The port number on which the new cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The same port as the original cluster.

" }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The name of the DB subnet group to use for the new DB cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" + "documentation":"

The name of the subnet group to use for the new cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of virtual private cloud (VPC) security groups that the new DB cluster will belong to.

" + "documentation":"

A list of virtual private cloud (VPC) security groups that the new cluster will belong to.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the restored DB cluster.

" + "documentation":"

The tags to be assigned to the restored cluster.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new DB cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

  • If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is encrypted using the AWS KMS key that was used to encrypt the DB snapshot or the DB cluster snapshot.

  • If the DB snapshot or the DB cluster snapshot in SnapshotIdentifier is not encrypted, then the restored DB cluster is not encrypted.

" + "documentation":"

The AWS KMS key identifier to use when restoring an encrypted cluster from a DB snapshot or cluster snapshot.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

  • If the snapshot or cluster snapshot in SnapshotIdentifier is encrypted, then the restored cluster is encrypted using the AWS KMS key that was used to encrypt the snapshot or the cluster snapshot.

  • If the snapshot or the cluster snapshot in SnapshotIdentifier is not encrypted, then the restored DB cluster is not encrypted.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -3623,39 +3623,39 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The name of the new DB cluster to be created.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" + "documentation":"

The name of the new cluster to be created.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" }, "SourceDBClusterIdentifier":{ "shape":"String", - "documentation":"

The identifier of the source DB cluster from which to restore.

Constraints:

  • Must match the identifier of an existing DBCluster.

" + "documentation":"

The identifier of the source cluster from which to restore.

Constraints:

  • Must match the identifier of an existing DBCluster.

" }, "RestoreToTime":{ "shape":"TStamp", - "documentation":"

The date and time to restore the DB cluster to.

Valid values: A time in Universal Coordinated Time (UTC) format.

Constraints:

  • Must be before the latest restorable time for the DB instance.

  • Must be specified if the UseLatestRestorableTime parameter is not provided.

  • Cannot be specified if the UseLatestRestorableTime parameter is true.

  • Cannot be specified if the RestoreType parameter is copy-on-write.

Example: 2015-03-07T23:45:00Z

" + "documentation":"

The date and time to restore the cluster to.

Valid values: A time in Universal Coordinated Time (UTC) format.

Constraints:

  • Must be before the latest restorable time for the instance.

  • Must be specified if the UseLatestRestorableTime parameter is not provided.

  • Cannot be specified if the UseLatestRestorableTime parameter is true.

  • Cannot be specified if the RestoreType parameter is copy-on-write.

Example: 2015-03-07T23:45:00Z

" }, "UseLatestRestorableTime":{ "shape":"Boolean", - "documentation":"

A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise.

Default: false

Constraints: Cannot be specified if the RestoreToTime parameter is provided.

" + "documentation":"

A value that is set to true to restore the cluster to the latest restorable backup time, and false otherwise.

Default: false

Constraints: Cannot be specified if the RestoreToTime parameter is provided.

" }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The default port for the engine.

" + "documentation":"

The port number on which the new cluster accepts connections.

Constraints: Must be a value from 1150 to 65535.

Default: The default port for the engine.

" }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The DB subnet group name to use for the new DB cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" + "documentation":"

The subnet group name to use for the new cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of VPC security groups that the new DB cluster belongs to.

" + "documentation":"

A list of VPC security groups that the new cluster belongs to.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The tags to be assigned to the restored DB cluster.

" + "documentation":"

The tags to be assigned to the restored cluster.

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new DB cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

You can restore to a new DB cluster and encrypt the new DB cluster with an AWS KMS key that is different from the AWS KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the AWS KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

  • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the AWS KMS key that was used to encrypt the source DB cluster.

  • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.

If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.

" + "documentation":"

The AWS KMS key identifier to use when restoring an encrypted cluster from an encrypted cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

You can restore to a new cluster and encrypt the new cluster with an AWS KMS key that is different from the AWS KMS key used to encrypt the source cluster. The new DB cluster is encrypted with the AWS KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

  • If the cluster is encrypted, then the restored cluster is encrypted using the AWS KMS key that was used to encrypt the source cluster.

  • If the cluster is not encrypted, then the restored cluster is not encrypted.

If DBClusterIdentifier refers to a cluster that is not encrypted, then the restore request is rejected.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -3690,7 +3690,7 @@ "type":"structure", "members":{ }, - "documentation":"

The request would cause you to exceed the allowed number of DB snapshots.

", + "documentation":"

The request would cause you to exceed the allowed number of snapshots.

", "error":{ "code":"SnapshotQuotaExceeded", "httpStatusCode":400, @@ -3745,7 +3745,7 @@ "type":"structure", "members":{ }, - "documentation":"

The request would cause you to exceed the allowed amount of storage available across all DB instances.

", + "documentation":"

The request would cause you to exceed the allowed amount of storage available across all instances.

", "error":{ "code":"StorageQuotaExceeded", "httpStatusCode":400, @@ -3788,7 +3788,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB subnet is already in use in the Availability Zone.

", + "documentation":"

The subnet is already in use in the Availability Zone.

", "error":{ "code":"SubnetAlreadyInUse", "httpStatusCode":400, @@ -3855,7 +3855,7 @@ }, "Description":{ "shape":"String", - "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" + "documentation":"

The version of the database engine that an instance can be upgraded to.

" }, "AutoUpgrade":{ "shape":"Boolean", @@ -3866,7 +3866,7 @@ "documentation":"

A value that indicates whether a database engine is upgraded to a major version.

" } }, - "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" + "documentation":"

The version of the database engine that an instance can be upgraded to.

" }, "ValidUpgradeTargetList":{ "type":"list", diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index 990ffdf9..e63eb765 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -2499,7 +2499,10 @@ }, "DisableLDAPSRequest":{ "type":"structure", - "required":["DirectoryId"], + "required":[ + "DirectoryId", + "Type" + ], "members":{ "DirectoryId":{ "shape":"DirectoryId", @@ -2644,7 +2647,10 @@ }, "EnableLDAPSRequest":{ "type":"structure", - "required":["DirectoryId"], + "required":[ + "DirectoryId", + "Type" + ], "members":{ "DirectoryId":{ "shape":"DirectoryId", diff --git a/botocore/data/ebs/2019-11-02/service-2.json b/botocore/data/ebs/2019-11-02/service-2.json index 750b6b65..c562c9fb 100644 --- a/botocore/data/ebs/2019-11-02/service-2.json +++ b/botocore/data/ebs/2019-11-02/service-2.json @@ -96,11 +96,11 @@ }, "FirstBlockToken":{ "shape":"BlockToken", - "documentation":"

The block token for the block index of the first snapshot ID specified in the list changed blocks operation. This value is absent if the first snapshot does not have the changed block that is on the second snapshot.

" + "documentation":"

The block token for the block index of the FirstSnapshotId specified in the ListChangedBlocks operation. This value is absent if the first snapshot does not have the changed block that is on the second snapshot.

" }, "SecondBlockToken":{ "shape":"BlockToken", - "documentation":"

The block token for the block index of the second snapshot ID specified in the list changed blocks operation.

" + "documentation":"

The block token for the block index of the SecondSnapshotId specified in the ListChangedBlocks operation.

" } }, "documentation":"

A block of data in an Amazon Elastic Block Store snapshot that is different from another snapshot of the same volume/snapshot lineage.

", @@ -140,13 +140,13 @@ }, "BlockIndex":{ "shape":"BlockIndex", - "documentation":"

The block index of the block from which to get data.

Obtain the block index by running the list changed blocks or list snapshot blocks operations.

", + "documentation":"

The block index of the block from which to get data.

Obtain the BlockIndex by running the ListChangedBlocks or ListSnapshotBlocks operations.

", "location":"uri", "locationName":"blockIndex" }, "BlockToken":{ "shape":"BlockToken", - "documentation":"

The block token of the block from which to get data.

Obtain the block token by running the list changed blocks or list snapshot blocks operations.

", + "documentation":"

The block token of the block from which to get data.

Obtain the BlockToken by running the ListChangedBlocks or ListSnapshotBlocks operations.

", "location":"querystring", "locationName":"blockToken" } @@ -167,7 +167,7 @@ }, "Checksum":{ "shape":"Checksum", - "documentation":"

The checksum generated for the block.

", + "documentation":"

The checksum generated for the block, which is Base64 encoded.

", "location":"header", "locationName":"x-amz-Checksum" }, @@ -186,13 +186,13 @@ "members":{ "FirstSnapshotId":{ "shape":"SnapshotId", - "documentation":"

The ID of the first snapshot to use for the comparison.

", + "documentation":"

The ID of the first snapshot to use for the comparison.

The FirstSnapshotID parameter must be specified with a SecondSnapshotId parameter; otherwise, an error occurs.

", "location":"querystring", "locationName":"firstSnapshotId" }, "SecondSnapshotId":{ "shape":"SnapshotId", - "documentation":"

The ID of the second snapshot to use for the comparison.

", + "documentation":"

The ID of the second snapshot to use for the comparison.

The SecondSnapshotId parameter must be specified with a FirstSnapshotID parameter; otherwise, an error occurs.

", "location":"uri", "locationName":"secondSnapshotId" }, @@ -225,7 +225,7 @@ }, "ExpiryTime":{ "shape":"TimeStamp", - "documentation":"

The time when the block token expires.

" + "documentation":"

The time when the BlockToken expires.

" }, "VolumeSize":{ "shape":"VolumeSize", @@ -280,7 +280,7 @@ }, "ExpiryTime":{ "shape":"TimeStamp", - "documentation":"

The time when the block token expires.

" + "documentation":"

The time when the BlockToken expires.

" }, "VolumeSize":{ "shape":"VolumeSize", @@ -347,5 +347,5 @@ }, "VolumeSize":{"type":"long"} }, - "documentation":"

You can use the Amazon Elastic Block Store (EBS) direct APIs to directly read the data on your EBS snapshots, and identify the difference between two snapshots. You can view the details of blocks in an EBS snapshot, compare the block difference between two snapshots, and directly access the data in a snapshot. If you’re an independent software vendor (ISV) who offers backup services for EBS, the EBS direct APIs makes it easier and more cost-effective to track incremental changes on your EBS volumes via EBS snapshots. This can be done without having to create new volumes from EBS snapshots, and then use EC2 instances to compare the differences.

This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an EBS Snapshot. For more information about how to use the EBS direct APIs, see the EBS direct APIs User Guide. To view the currently supported AWS Regions and endpoints for the EBS direct APIs, see AWS Service Endpoints in the AWS General Reference.

" + "documentation":"

You can use the Amazon Elastic Block Store (EBS) direct APIs to directly read the data on your EBS snapshots, and identify the difference between two snapshots. You can view the details of blocks in an EBS snapshot, compare the block difference between two snapshots, and directly access the data in a snapshot. If you’re an independent software vendor (ISV) who offers backup services for EBS, the EBS direct APIs make it easier and more cost-effective to track incremental changes on your EBS volumes via EBS snapshots. This can be done without having to create new volumes from EBS snapshots.

This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an EBS Snapshot in the Amazon Elastic Compute Cloud User Guide. For more information about the supported AWS Regions, endpoints, and service quotas for the EBS direct APIs, see Amazon Elastic Block Store Endpoints and Quotas in the AWS General Reference.

" } diff --git a/botocore/data/ec2/2016-11-15/paginators-1.json b/botocore/data/ec2/2016-11-15/paginators-1.json index 5d744feb..9368ad56 100644 --- a/botocore/data/ec2/2016-11-15/paginators-1.json +++ b/botocore/data/ec2/2016-11-15/paginators-1.json @@ -427,6 +427,18 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "FastSnapshotRestores" + }, + "DescribeIpv6Pools": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Ipv6Pools" + }, + "GetAssociatedIpv6PoolCidrs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Ipv6CidrAssociations" } } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 4da9502e..78612fb0 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -70,7 +70,7 @@ }, "input":{"shape":"AdvertiseByoipCidrRequest"}, "output":{"shape":"AdvertiseByoipCidrResult"}, - "documentation":"

Advertises an IPv4 address range that is provisioned for use with your AWS resources through bring your own IP addresses (BYOIP).

You can perform this operation at most once every 10 seconds, even if you specify different address ranges each time.

We recommend that you stop advertising the BYOIP CIDR from other locations when you advertise it from AWS. To minimize down time, you can configure your AWS resources to use an address from a BYOIP CIDR before it is advertised, and then simultaneously stop advertising it from the current location and start advertising it through AWS.

It can take a few minutes before traffic to the specified addresses starts routing to AWS because of BGP propagation delays.

To stop advertising the BYOIP CIDR, use WithdrawByoipCidr.

" + "documentation":"

Advertises an IPv4 or IPv6 address range that is provisioned for use with your AWS resources through bring your own IP addresses (BYOIP).

You can perform this operation at most once every 10 seconds, even if you specify different address ranges each time.

We recommend that you stop advertising the BYOIP CIDR from other locations when you advertise it from AWS. To minimize down time, you can configure your AWS resources to use an address from a BYOIP CIDR before it is advertised, and then simultaneously stop advertising it from the current location and start advertising it through AWS.

It can take a few minutes before traffic to the specified addresses starts routing to AWS because of BGP propagation delays.

To stop advertising the BYOIP CIDR, use WithdrawByoipCidr.

" }, "AllocateAddress":{ "name":"AllocateAddress", @@ -130,7 +130,7 @@ }, "input":{"shape":"AssociateAddressRequest"}, "output":{"shape":"AssociateAddressResult"}, - "documentation":"

Associates an Elastic IP address with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

" + "documentation":"

Associates an Elastic IP address with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

[EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

[VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

You cannot associate an Elastic IP address with an interface in a different network border group.

This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

" }, "AssociateClientVpnTargetNetwork":{ "name":"AssociateClientVpnTargetNetwork", @@ -209,7 +209,7 @@ }, "input":{"shape":"AssociateVpcCidrBlockRequest"}, "output":{"shape":"AssociateVpcCidrBlockResult"}, - "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP). The IPv6 CIDR block size is fixed at /56.

For more information about associating CIDR blocks with your VPC and applicable restrictions, see VPC and Subnet Sizing in the Amazon Virtual Private Cloud User Guide.

" }, "AttachClassicLinkVpc":{ "name":"AttachClassicLinkVpc", @@ -871,7 +871,7 @@ }, "input":{"shape":"CreateVpcRequest"}, "output":{"shape":"CreateVpcResult"}, - "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC.

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). For more information about how large to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

You can optionally request an IPv6 CIDR block for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 addresses, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP).

By default, each instance you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide (AmazonProvidedDNS). For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpcEndpoint":{ "name":"CreateVpcEndpoint", @@ -881,7 +881,7 @@ }, "input":{"shape":"CreateVpcEndpointRequest"}, "output":{"shape":"CreateVpcEndpointResult"}, - "documentation":"

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

" + "documentation":"

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace Partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint, which will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

" }, "CreateVpcEndpointConnectionNotification":{ "name":"CreateVpcEndpointConnectionNotification", @@ -901,7 +901,7 @@ }, "input":{"shape":"CreateVpcEndpointServiceConfigurationRequest"}, "output":{"shape":"CreateVpcEndpointServiceConfigurationResult"}, - "documentation":"

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

If you set the private DNS name, you must prove that you own the private DNS domain name. For more information, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -1439,7 +1439,7 @@ }, "input":{"shape":"DescribeAccountAttributesRequest"}, "output":{"shape":"DescribeAccountAttributesResult"}, - "documentation":"

Describes attributes of your AWS account. The following are the supported account attributes:

  • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

  • default-vpc: The ID of the default VPC for your account, or none.

  • max-instances: The maximum number of On-Demand Instances that you can run.

  • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

  • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

  • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

" + "documentation":"

Describes attributes of your AWS account. The following are the supported account attributes:

  • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

  • default-vpc: The ID of the default VPC for your account, or none.

  • max-instances: This attribute is no longer supported. The returned value does not reflect your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic Compute Cloud User Guide.

  • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

  • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

  • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

" }, "DescribeAddresses":{ "name":"DescribeAddresses", @@ -1489,7 +1489,7 @@ }, "input":{"shape":"DescribeByoipCidrsRequest"}, "output":{"shape":"DescribeByoipCidrsResult"}, - "documentation":"

Describes the IP address ranges that were specified in calls to ProvisionByoipCidr.

To describe the address pools that were created when you provisioned the address ranges, use DescribePublicIpv4Pools.

" + "documentation":"

Describes the IP address ranges that were specified in calls to ProvisionByoipCidr.

To describe the address pools that were created when you provisioned the address ranges, use DescribePublicIpv4Pools or DescribeIpv6Pools.

" }, "DescribeCapacityReservations":{ "name":"DescribeCapacityReservations", @@ -1679,7 +1679,7 @@ }, "input":{"shape":"DescribeFleetsRequest"}, "output":{"shape":"DescribeFleetsResult"}, - "documentation":"

Describes the specified EC2 Fleets or all your EC2 Fleets.

" + "documentation":"

Describes the specified EC2 Fleets or all of your EC2 Fleets.

" }, "DescribeFlowLogs":{ "name":"DescribeFlowLogs", @@ -1881,6 +1881,16 @@ "output":{"shape":"DescribeInternetGatewaysResult"}, "documentation":"

Describes one or more of your internet gateways.

" }, + "DescribeIpv6Pools":{ + "name":"DescribeIpv6Pools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIpv6PoolsRequest"}, + "output":{"shape":"DescribeIpv6PoolsResult"}, + "documentation":"

Describes your IPv6 address pools.

" + }, "DescribeKeyPairs":{ "name":"DescribeKeyPairs", "http":{ @@ -2833,6 +2843,16 @@ "output":{"shape":"ExportTransitGatewayRoutesResult"}, "documentation":"

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

" }, + "GetAssociatedIpv6PoolCidrs":{ + "name":"GetAssociatedIpv6PoolCidrs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAssociatedIpv6PoolCidrsRequest"}, + "output":{"shape":"GetAssociatedIpv6PoolCidrsResult"}, + "documentation":"

Gets information about the IPv6 CIDR block associations for a specified IPv6 address pool.

" + }, "GetCapacityReservationUsage":{ "name":"GetCapacityReservationUsage", "http":{ @@ -3081,7 +3101,7 @@ }, "input":{"shape":"ModifyEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"ModifyEbsDefaultKmsKeyIdResult"}, - "documentation":"

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a symmetric customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric CMKs.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyFleet":{ "name":"ModifyFleet", @@ -3352,7 +3372,7 @@ }, "input":{"shape":"ModifyVpcEndpointServiceConfigurationRequest"}, "output":{"shape":"ModifyVpcEndpointServiceConfigurationResult"}, - "documentation":"

Modifies the attributes of your VPC endpoint service configuration. You can change the Network Load Balancers for your service, and you can specify whether acceptance is required for requests to connect to your endpoint service through an interface VPC endpoint.

" + "documentation":"

Modifies the attributes of your VPC endpoint service configuration. You can change the Network Load Balancers for your service, and you can specify whether acceptance is required for requests to connect to your endpoint service through an interface VPC endpoint.

If you set or modify the private DNS name, you must prove that you own the private DNS domain name. For more information, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" }, "ModifyVpcEndpointServicePermissions":{ "name":"ModifyVpcEndpointServicePermissions", @@ -3442,7 +3462,7 @@ }, "input":{"shape":"ProvisionByoipCidrRequest"}, "output":{"shape":"ProvisionByoipCidrResult"}, - "documentation":"

Provisions an address range for use with your AWS resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr.

AWS verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool.

" + "documentation":"

Provisions an IPv4 or IPv6 address range for use with your AWS resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr.

AWS verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool.

" }, "PurchaseHostReservation":{ "name":"PurchaseHostReservation", @@ -3491,7 +3511,7 @@ }, "input":{"shape":"RegisterImageRequest"}, "output":{"shape":"RegisterImageResult"}, - "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. To create a new AMI for operating systems that require a billing product code, do the following:

  1. Launch an instance from an existing AMI with that billing product code.

  2. Customize the instance.

  3. Create a new AMI from the instance using CreateImage to preserve the billing product code association.

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" + "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. To create a new AMI for operating systems that require a billing product code, instead of instead of registering the AMI, do the following to preserve the billing product code association:

  1. Launch an instance from an existing AMI with that billing product code.

  2. Customize the instance.

  3. Create an AMI from the instance using CreateImage.

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" }, "RegisterTransitGatewayMulticastGroupMembers":{ "name":"RegisterTransitGatewayMulticastGroupMembers", @@ -3822,6 +3842,16 @@ "output":{"shape":"StartInstancesResult"}, "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

" }, + "StartVpcEndpointServicePrivateDnsVerification":{ + "name":"StartVpcEndpointServicePrivateDnsVerification", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartVpcEndpointServicePrivateDnsVerificationRequest"}, + "output":{"shape":"StartVpcEndpointServicePrivateDnsVerificationResult"}, + "documentation":"

Initiates the verification process to prove that the service provider owns the private DNS name domain for the endpoint service.

The service provider must successfully perform the verification before the consumer can use the name to access the service.

Before the service provider runs this command, they must add a record to the DNS server. For more information, see Adding a TXT Record to Your Domain's DNS Server in the Amazon VPC User Guide.

" + }, "StopInstances":{ "name":"StopInstances", "http":{ @@ -3909,7 +3939,7 @@ }, "input":{"shape":"WithdrawByoipCidrRequest"}, "output":{"shape":"WithdrawByoipCidrResult"}, - "documentation":"

Stops advertising an IPv4 address range that is provisioned as an address pool.

You can perform this operation at most once every 10 seconds, even if you specify different address ranges each time.

It can take a few minutes before traffic to the specified addresses stops routing to AWS because of BGP propagation delays.

" + "documentation":"

Stops advertising an address range that is provisioned as an address pool.

You can perform this operation at most once every 10 seconds, even if you specify different address ranges each time.

It can take a few minutes before traffic to the specified addresses stops routing to AWS because of BGP propagation delays.

" } }, "shapes":{ @@ -4006,7 +4036,7 @@ }, "ServiceId":{ "shape":"ServiceId", - "documentation":"

The ID of the endpoint service.

" + "documentation":"

The ID of the VPC endpoint service.

" }, "VpcEndpointIds":{ "shape":"ValueStringList", @@ -4231,7 +4261,7 @@ "members":{ "Cidr":{ "shape":"String", - "documentation":"

The IPv4 address range, in CIDR notation. This must be the exact range that you provisioned. You can't advertise only a portion of the provisioned range.

" + "documentation":"

The address range, in CIDR notation. This must be the exact range that you provisioned. You can't advertise only a portion of the provisioned range.

" }, "DryRun":{ "shape":"Boolean", @@ -4693,7 +4723,7 @@ ], "members":{ "DhcpOptionsId":{ - "shape":"DhcpOptionsId", + "shape":"DefaultingDhcpOptionsId", "documentation":"

The ID of the DHCP options set, or default to associate no DHCP options with the VPC.

" }, "VpcId":{ @@ -4787,7 +4817,7 @@ "locationName":"ipv6CidrBlock" }, "SubnetId":{ - "shape":"String", + "shape":"SubnetId", "documentation":"

The ID of your subnet.

", "locationName":"subnetId" } @@ -4888,6 +4918,14 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" }, + "Ipv6Pool":{ + "shape":"String", + "documentation":"

The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block.

" + }, + "Ipv6CidrBlock":{ + "shape":"String", + "documentation":"

An IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool in the request.

To let Amazon choose the IPv6 CIDR block for you, omit this parameter.

" + }, "Ipv6CidrBlockNetworkBorderGroup":{ "shape":"String", "documentation":"

The name of the location from which we advertise the IPV6 CIDR block. Use this parameter to limit the CiDR block to this location.

You must set AmazonProvidedIpv6CidrBlock to true to use this parameter.

You can have one IPv6 CIDR block association per network border group.

" @@ -5141,7 +5179,6 @@ }, "documentation":"

Contains the output of AttachVpnGateway.

" }, - "AttachmentId":{"type":"string"}, "AttachmentStatus":{ "type":"string", "enum":[ @@ -5565,6 +5602,7 @@ } }, "Boolean":{"type":"boolean"}, + "BundleId":{"type":"string"}, "BundleIdStringList":{ "type":"list", "member":{ @@ -5693,7 +5731,7 @@ "members":{ "Cidr":{ "shape":"String", - "documentation":"

The public IPv4 address range, in CIDR notation.

", + "documentation":"

The address range, in CIDR notation.

", "locationName":"cidr" }, "Description":{ @@ -5730,7 +5768,8 @@ "failed-provision", "pending-deprovision", "pending-provision", - "provisioned" + "provisioned", + "provisioned-not-publicly-advertisable" ] }, "CancelBatchErrorCode":{ @@ -5747,7 +5786,7 @@ "required":["BundleId"], "members":{ "BundleId":{ - "shape":"String", + "shape":"BundleId", "documentation":"

The ID of the bundle task.

" }, "DryRun":{ @@ -6174,6 +6213,27 @@ "Linux with SQL Server Enterprise" ] }, + "CapacityReservationOptions":{ + "type":"structure", + "members":{ + "UsageStrategy":{ + "shape":"FleetCapacityReservationUsageStrategy", + "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price or prioritized) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price or prioritized).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

", + "locationName":"usageStrategy" + } + }, + "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

This strategy can only be used if the EC2 Fleet is of type instant.

For more information about Capacity Reservations, see On-Demand Capacity Reservations in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations in the Amazon Elastic Compute Cloud User Guide.

" + }, + "CapacityReservationOptionsRequest":{ + "type":"structure", + "members":{ + "UsageStrategy":{ + "shape":"FleetCapacityReservationUsageStrategy", + "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price or prioritized) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price or prioritized).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

" + } + }, + "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

This strategy can only be used if the EC2 Fleet is of type instant.

For more information about Capacity Reservations, see On-Demand Capacity Reservations in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations in the Amazon Elastic Compute Cloud User Guide.

" + }, "CapacityReservationPreference":{ "type":"string", "enum":[ @@ -6688,6 +6748,11 @@ "documentation":"

The transport protocol used by the Client VPN endpoint.

", "locationName":"transportProtocol" }, + "VpnPort":{ + "shape":"Integer", + "documentation":"

The port number for the Client VPN endpoint.

", + "locationName":"vpnPort" + }, "AssociatedTargetNetworks":{ "shape":"AssociatedTargetNetworkSet", "documentation":"

Information about the associated target networks. A target network is a subnet in a VPC.

", @@ -7162,8 +7227,8 @@ "locationName":"encrypted" }, "KmsKeyId":{ - "shape":"String", - "documentation":"

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

", + "shape":"KmsKeyId", + "documentation":"

An identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

Amazon EBS does not support asymmetric CMKs.

", "locationName":"kmsKeyId" }, "Name":{ @@ -7415,6 +7480,10 @@ "shape":"TransportProtocol", "documentation":"

The transport protocol to be used by the VPN session.

Default value: udp

" }, + "VpnPort":{ + "shape":"Integer", + "documentation":"

The port number to assign to the Client VPN endpoint for TCP and UDP traffic.

Valid Values: 443 | 1194

Default Value: 443

" + }, "Description":{ "shape":"String", "documentation":"

A brief description of the Client VPN endpoint.

" @@ -7476,7 +7545,7 @@ "documentation":"

The IPv4 address range, in CIDR notation, of the route destination. For example:

  • To add a route for Internet access, enter 0.0.0.0/0

  • To add a route for a peered VPC, enter the peered VPC's IPv4 CIDR range

  • To add a route for an on-premises network, enter the AWS Site-to-Site VPN connection's IPv4 CIDR range

Route address ranges cannot overlap with the CIDR range specified for client allocation.

" }, "TargetVpcSubnetId":{ - "shape":"String", + "shape":"SubnetId", "documentation":"

The ID of the subnet through which you want to route traffic. The specified subnet must be an existing target network of the Client VPN endpoint.

" }, "Description":{ @@ -7658,7 +7727,7 @@ "members":{ "LaunchTemplateAndOverrides":{ "shape":"LaunchTemplateAndOverridesResponse", - "documentation":"

The launch templates and overrides that were used for launching the instances. Any parameters that you specify in the Overrides override the same parameters in the launch template.

", + "documentation":"

The launch templates and overrides that were used for launching the instances. The values that you specify in the Overrides replace the values in the launch template.

", "locationName":"launchTemplateAndOverrides" }, "Lifecycle":{ @@ -7673,7 +7742,7 @@ }, "ErrorMessage":{ "shape":"String", - "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", + "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see Error Codes.

", "locationName":"errorMessage" } }, @@ -7691,7 +7760,7 @@ "members":{ "LaunchTemplateAndOverrides":{ "shape":"LaunchTemplateAndOverridesResponse", - "documentation":"

The launch templates and overrides that were used for launching the instances. Any parameters that you specify in the Overrides override the same parameters in the launch template.

", + "documentation":"

The launch templates and overrides that were used for launching the instances. The values that you specify in the Overrides replace the values in the launch template.

", "locationName":"launchTemplateAndOverrides" }, "Lifecycle":{ @@ -7711,7 +7780,7 @@ }, "Platform":{ "shape":"PlatformValues", - "documentation":"

The value is Windows for Windows instances; otherwise blank.

", + "documentation":"

The value is Windows for Windows instances. Otherwise, the value is blank.

", "locationName":"platform" } }, @@ -7737,7 +7806,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

" }, "SpotOptions":{ "shape":"SpotOptionsRequest", @@ -7854,6 +7923,10 @@ "LogFormat":{ "shape":"String", "documentation":"

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow Log Records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the AWS CLI, use single quotation marks (' ') to surround the parameter value.

Only applicable to flow logs that are published to an Amazon S3 bucket.

" + }, + "MaxAggregationInterval":{ + "shape":"Integer", + "documentation":"

The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. You can specify 60 seconds (1 minute) or 600 seconds (10 minutes).

For network interfaces attached to Nitro-based instances, the aggregation interval is always 60 seconds, regardless of the value that you specify.

Default: 600

" } } }, @@ -7950,7 +8023,7 @@ "locationName":"dryRun" }, "InstanceId":{ - "shape":"String", + "shape":"InstanceId", "documentation":"

The ID of the instance.

", "locationName":"instanceId" }, @@ -8760,7 +8833,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the Outpost.

" }, "VpcId":{ - "shape":"String", + "shape":"VpcId", "documentation":"

The ID of the VPC.

" }, "DryRun":{ @@ -9368,7 +9441,7 @@ }, "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"

The ID of the endpoint.

" + "documentation":"

The ID of the endpoint.

" }, "ConnectionNotificationArn":{ "shape":"String", @@ -9380,7 +9453,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" } } }, @@ -9394,7 +9467,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "locationName":"clientToken" } } @@ -9443,11 +9516,16 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" }, "PrivateDnsEnabled":{ "shape":"Boolean", - "documentation":"

(Interface endpoint) Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, kinesis.us-east-1.amazonaws.com) which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.

To use a private hosted zone, you must set the following VPC attributes to true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to set the VPC attributes.

Default: true

" + "documentation":"

(Interface endpoint) Indicates whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, kinesis.us-east-1.amazonaws.com), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.

To use a private hosted zone, you must set the following VPC attributes to true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to set the VPC attributes.

Default: true

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to associate with the endpoint.

", + "locationName":"TagSpecification" } }, "documentation":"

Contains the parameters for CreateVpcEndpoint.

" @@ -9462,7 +9540,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "locationName":"clientToken" } }, @@ -9478,7 +9556,11 @@ }, "AcceptanceRequired":{ "shape":"Boolean", - "documentation":"

Indicate whether requests from service consumers to create an endpoint to your service must be accepted. To accept a request, use AcceptVpcEndpointConnections.

" + "documentation":"

Indicates whether requests from service consumers to create an endpoint to your service must be accepted. To accept a request, use AcceptVpcEndpointConnections.

" + }, + "PrivateDnsName":{ + "shape":"String", + "documentation":"

The private DNS name to assign to the VPC endpoint service.

" }, "NetworkLoadBalancerArns":{ "shape":"ValueStringList", @@ -9487,7 +9569,12 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to associate with the service.

", + "locationName":"TagSpecification" } } }, @@ -9501,7 +9588,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "locationName":"clientToken" } } @@ -9558,6 +9645,14 @@ "documentation":"

Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or the size of the CIDR block.

", "locationName":"amazonProvidedIpv6CidrBlock" }, + "Ipv6Pool":{ + "shape":"String", + "documentation":"

The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block.

" + }, + "Ipv6CidrBlock":{ + "shape":"String", + "documentation":"

The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool in the request.

To let Amazon choose the IPv6 CIDR block for you, omit this parameter.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -9791,6 +9886,7 @@ }, "DateTime":{"type":"timestamp"}, "DedicatedHostFlag":{"type":"boolean"}, + "DedicatedHostId":{"type":"string"}, "DefaultRouteTableAssociationValue":{ "type":"string", "enum":[ @@ -9812,6 +9908,7 @@ "on-demand" ] }, + "DefaultingDhcpOptionsId":{"type":"string"}, "DeleteClientVpnEndpointRequest":{ "type":"structure", "required":["ClientVpnEndpointId"], @@ -9848,7 +9945,7 @@ "documentation":"

The ID of the Client VPN endpoint from which the route is to be deleted.

" }, "TargetVpcSubnetId":{ - "shape":"String", + "shape":"SubnetId", "documentation":"

The ID of the target subnet used by the route.

" }, "DestinationCidrBlock":{ @@ -10109,7 +10206,7 @@ "required":["KeyName"], "members":{ "KeyName":{ - "shape":"String", + "shape":"KeyPairName", "documentation":"

The name of the key pair.

" }, "DryRun":{ @@ -10583,7 +10680,7 @@ "required":["SubnetId"], "members":{ "SubnetId":{ - "shape":"String", + "shape":"SubnetId", "documentation":"

The ID of the subnet.

" }, "DryRun":{ @@ -11050,7 +11147,7 @@ "members":{ "Cidr":{ "shape":"String", - "documentation":"

The public IPv4 address range, in CIDR notation. The prefix must be the same prefix that you specified when you provisioned the address range.

" + "documentation":"

The address range, in CIDR notation. The prefix must be the same prefix that you specified when you provisioned the address range.

" }, "DryRun":{ "shape":"Boolean", @@ -11073,7 +11170,7 @@ "required":["ImageId"], "members":{ "ImageId":{ - "shape":"String", + "shape":"ImageId", "documentation":"

The ID of the AMI.

" }, "DryRun":{ @@ -11176,7 +11273,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • allocation-id - [EC2-VPC] The allocation ID for the address.

  • association-id - [EC2-VPC] The association ID for the address.

  • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

  • instance-id - The ID of the instance the address is associated with, if any.

  • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

  • network-interface-owner-id - The AWS account ID of the owner.

  • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

  • public-ip - The Elastic IP address.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • allocation-id - [EC2-VPC] The allocation ID for the address.

  • association-id - [EC2-VPC] The association ID for the address.

  • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

  • instance-id - The ID of the instance the address is associated with, if any.

  • network-border-group - The location from where the IP address is advertised.

  • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

  • network-interface-owner-id - The AWS account ID of the owner.

  • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

  • public-ip - The Elastic IP address.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "PublicIps":{ @@ -11457,7 +11554,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • description - The description of the authorization rule.

  • destination-cidr - The CIDR of the network to which the authorization rule applies.

  • group-id - The ID of the Active Directory group to which the authorization rule grants access.

", "locationName":"Filter" }, "MaxResults":{ @@ -11496,7 +11593,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • connection-id - The ID of the connection.

  • username - For Active Directory client authentication, the user name of the client who established the client connection.

", "locationName":"Filter" }, "NextToken":{ @@ -11551,7 +11648,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • endpoint-id - The ID of the Client VPN endpoint.

  • transport-protocol - The transport protocol (tcp | udp).

", "locationName":"Filter" }, "DryRun":{ @@ -11590,7 +11687,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • destination-cidr - The CIDR of the route destination.

  • origin - How the route was associated with the Client VPN endpoint (associate | add-route).

  • target-subnet - The ID of the subnet through which traffic is routed.

", "locationName":"Filter" }, "MaxResults":{ @@ -11649,7 +11746,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • association-id - The ID of the association.

  • target-network-id - The ID of the subnet specified as the target network.

  • vpc-id - The ID of the VPC in which the target network is located.

", "locationName":"Filter" }, "DryRun":{ @@ -11851,6 +11948,11 @@ "NextToken":{ "shape":"String", "documentation":"

The token for the next page of results.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "locationName":"Filter" } } }, @@ -11975,6 +12077,11 @@ "shape":"ExportTaskIdStringList", "documentation":"

The export task IDs.

", "locationName":"exportTaskId" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

the filters for the export tasks.

", + "locationName":"Filter" } } }, @@ -12103,7 +12210,7 @@ "members":{ "LaunchTemplateAndOverrides":{ "shape":"LaunchTemplateAndOverridesResponse", - "documentation":"

The launch templates and overrides that were used for launching the instances. Any parameters that you specify in the Overrides override the same parameters in the launch template.

", + "documentation":"

The launch templates and overrides that were used for launching the instances. The values that you specify in the Overrides replace the values in the launch template.

", "locationName":"launchTemplateAndOverrides" }, "Lifecycle":{ @@ -12118,7 +12225,7 @@ }, "ErrorMessage":{ "shape":"String", - "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", + "documentation":"

The error message that describes why the instance could not be launched. For more information about error messages, see Error Codes.

", "locationName":"errorMessage" } }, @@ -12246,7 +12353,7 @@ "members":{ "LaunchTemplateAndOverrides":{ "shape":"LaunchTemplateAndOverridesResponse", - "documentation":"

The launch templates and overrides that were used for launching the instances. Any parameters that you specify in the Overrides override the same parameters in the launch template.

", + "documentation":"

The launch templates and overrides that were used for launching the instances. The values that you specify in the Overrides replace the values in the launch template.

", "locationName":"launchTemplateAndOverrides" }, "Lifecycle":{ @@ -12266,7 +12373,7 @@ }, "Platform":{ "shape":"PlatformValues", - "documentation":"

The value is Windows for Windows instances; otherwise blank.

", + "documentation":"

The value is Windows for Windows instances. Otherwise, the value is blank.

", "locationName":"platform" } }, @@ -12977,7 +13084,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • auto-recovery-supported - Indicates whether auto recovery is supported. (true | false)

  • bare-metal - Indicates whether it is a bare metal instance type. (true | false)

  • burstable-performance-supported - Indicates whether it is a burstable performance instance type. (true | false)

  • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family. (true | false)

  • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized. (true | false)

  • ebs-info.encryption-support - Indicates whether EBS encryption is supported. (true | false)

  • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier. (true | false)

  • hibernation-supported - Indicates whether On-Demand hibernation is supported. (true | false)

  • hypervisor - The hypervisor used. (nitro | xen)

  • instance-storage-info.disk.count - The number of local disks.

  • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

  • instance-storage-info.disk.type - The storage technology for the local instance storage disks. (hdd | ssd)

  • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

  • instance-storage-supported - Indicates whether the instance type has local instance storage. (true | false)

  • memory-info.size-in-mib - The memory size.

  • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required. (required | supported | unsupported)

  • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

  • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

  • network-info.ipv6-supported - Indicates whether the instance type supports IPv6. (true | false)

  • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

  • network-info.network-performance - Describes the network performance.

  • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

  • vcpu-info.default-cores - The default number of cores for the instance type.

  • vcpu-info.default-threads-per-core - The default number of threads per cores for the instance type.

  • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • auto-recovery-supported - Indicates whether auto recovery is supported. (true | false)

  • bare-metal - Indicates whether it is a bare metal instance type. (true | false)

  • burstable-performance-supported - Indicates whether it is a burstable performance instance type. (true | false)

  • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family. (true | false)

  • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized. (true | false)

  • ebs-info.encryption-support - Indicates whether EBS encryption is supported. (true | false)

  • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier. (true | false)

  • hibernation-supported - Indicates whether On-Demand hibernation is supported. (true | false)

  • hypervisor - The hypervisor used. (nitro | xen)

  • instance-storage-info.disk.count - The number of local disks.

  • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

  • instance-storage-info.disk.type - The storage technology for the local instance storage disks. (hdd | ssd)

  • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

  • instance-storage-supported - Indicates whether the instance type has local instance storage. (true | false)

  • memory-info.size-in-mib - The memory size.

  • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required. (required | supported | unsupported)

  • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

  • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

  • network-info.ipv6-supported - Indicates whether the instance type supports IPv6. (true | false)

  • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

  • network-info.network-performance - Describes the network performance.

  • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

  • vcpu-info.default-cores - The default number of cores for the instance type.

  • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

  • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

", "locationName":"Filter" }, "MaxResults":{ @@ -13010,7 +13117,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • client-token - The idempotency token you provided when you launched the instance.

  • dns-name - The public DNS name of the instance.

  • group-id - The ID of the security group for the instance. EC2-Classic only.

  • group-name - The name of the security group for the instance. EC2-Classic only.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen).

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched.

  • metadata-options.http-tokens - The metadata request authorization state (optional | required)

  • metadata-options.http-put-response-hop-limit - The http metadata request put response hop limit (integer, possible values 1 to 64)

  • metadata-options.http-endpoint - Enable or disable metadata access on http endpoint (enabled | disabled)

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • owner-id - The AWS account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

", + "documentation":"

The filters.

  • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

  • architecture - The instance architecture (i386 | x86_64 | arm64).

  • availability-zone - The Availability Zone of the instance.

  • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

  • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

  • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

  • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

  • block-device-mapping.volume-id - The volume ID of the EBS volume.

  • client-token - The idempotency token you provided when you launched the instance.

  • dns-name - The public DNS name of the instance.

  • group-id - The ID of the security group for the instance. EC2-Classic only.

  • group-name - The name of the security group for the instance. EC2-Classic only.

  • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

  • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

  • hypervisor - The hypervisor type of the instance (ovm | xen).

  • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

  • image-id - The ID of the image used to launch the instance.

  • instance-id - The ID of the instance.

  • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

  • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

  • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

  • instance-type - The type of instance (for example, t2.micro).

  • instance.group-id - The ID of the security group for the instance.

  • instance.group-name - The name of the security group for the instance.

  • ip-address - The public IPv4 address of the instance.

  • kernel-id - The kernel ID.

  • key-name - The name of the key pair used when the instance was launched.

  • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

  • launch-time - The time when the instance was launched.

  • metadata-options.http-tokens - The metadata request authorization state (optional | required)

  • metadata-options.http-put-response-hop-limit - The http metadata request put response hop limit (integer, possible values 1 to 64)

  • metadata-options.http-endpoint - Enable or disable metadata access on http endpoint (enabled | disabled)

  • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

  • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

  • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

  • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

  • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

  • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • network-interface.attachment.attachment-id - The ID of the interface attachment.

  • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

  • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • network-interface.attachment.device-index - The device index to which the network interface is attached.

  • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

  • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

  • network-interface.availability-zone - The Availability Zone for the network interface.

  • network-interface.description - The description of the network interface.

  • network-interface.group-id - The ID of a security group associated with the network interface.

  • network-interface.group-name - The name of a security group associated with the network interface.

  • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

  • network-interface.mac-address - The MAC address of the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.owner-id - The ID of the owner of the network interface.

  • network-interface.private-dns-name - The private DNS name of the network interface.

  • network-interface.requester-id - The requester ID for the network interface.

  • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

  • network-interface.status - The status of the network interface (available) | in-use).

  • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • network-interface.subnet-id - The ID of the subnet for the network interface.

  • network-interface.vpc-id - The ID of the VPC for the network interface.

  • owner-id - The AWS account ID of the instance owner.

  • placement-group-name - The name of the placement group for the instance.

  • placement-partition-number - The partition in which the instance is located.

  • platform - The platform. To list only Windows instances, use windows.

  • private-dns-name - The private IPv4 DNS name of the instance.

  • private-ip-address - The private IPv4 address of the instance.

  • product-code - The product code associated with the AMI used to launch the instance.

  • product-code.type - The type of product code (devpay | marketplace).

  • ramdisk-id - The RAM disk ID.

  • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

  • root-device-name - The device name of the root device volume (for example, /dev/sda1).

  • root-device-type - The type of the root device volume (ebs | instance-store).

  • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

  • spot-instance-request-id - The ID of the Spot Instance request.

  • state-reason-code - The reason code for the state change.

  • state-reason-message - A message that describes the state change.

  • subnet-id - The ID of the subnet for the instance.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • tenancy - The tenancy of an instance (dedicated | default | host).

  • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

  • vpc-id - The ID of the VPC that the instance is running in.

", "locationName":"Filter" }, "InstanceIds":{ @@ -13098,6 +13205,48 @@ } } }, + "DescribeIpv6PoolsRequest":{ + "type":"structure", + "members":{ + "PoolIds":{ + "shape":"ValueStringList", + "documentation":"

The IDs of the IPv6 address pools.

", + "locationName":"PoolId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "MaxResults":{ + "shape":"Ipv6PoolMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "locationName":"Filter" + } + } + }, + "DescribeIpv6PoolsResult":{ + "type":"structure", + "members":{ + "Ipv6Pools":{ + "shape":"Ipv6PoolSet", + "documentation":"

Information about the IPv6 address pools.

", + "locationName":"ipv6PoolSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeKeyPairsRequest":{ "type":"structure", "members":{ @@ -13111,6 +13260,11 @@ "documentation":"

The key pair names.

Default: Describes all your key pairs.

", "locationName":"KeyName" }, + "KeyPairIds":{ + "shape":"KeyPairIdStringList", + "documentation":"

The IDs of the key pairs.

", + "locationName":"KeyPairId" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -13757,7 +13911,7 @@ }, "MaxResults":{ "shape":"DescribeNetworkInterfacesMaxResults", - "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

" + "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results. You cannot specify this parameter and the network interface IDs parameter in the same request.

" } }, "documentation":"

Contains the parameters for DescribeNetworkInterfaces.

" @@ -13795,6 +13949,11 @@ "shape":"PlacementGroupStringList", "documentation":"

The names of the placement groups.

Default: Describes all your placement groups, or only those otherwise specified.

", "locationName":"groupName" + }, + "GroupIds":{ + "shape":"PlacementGroupIdStringList", + "documentation":"

The IDs of the placement groups.

", + "locationName":"GroupId" } } }, @@ -15604,7 +15763,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • connection-notification-arn - The ARN of SNS topic for the notification.

  • connection-notification-id - The ID of the notification.

  • connection-notification-state - The state of the notification (Enabled | Disabled).

  • connection-notification-type - The type of notification (Topic).

  • service-id - The ID of the endpoint service.

  • vpc-endpoint-id - The ID of the VPC endpoint.

", + "documentation":"

One or more filters.

  • connection-notification-arn - The ARN of the SNS topic for the notification.

  • connection-notification-id - The ID of the notification.

  • connection-notification-state - The state of the notification (Enabled | Disabled).

  • connection-notification-type - The type of notification (Topic).

  • service-id - The ID of the endpoint service.

  • vpc-endpoint-id - The ID of the VPC endpoint.

", "locationName":"Filter" }, "MaxResults":{ @@ -15646,7 +15805,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results are returned.

" }, "NextToken":{ "shape":"String", @@ -15688,7 +15847,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results are returned.

" }, "NextToken":{ "shape":"String", @@ -15730,7 +15889,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results are returned.

" }, "NextToken":{ "shape":"String", @@ -15767,12 +15926,12 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • service-name: The name of the service.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters.

  • service-name - The name of the service.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

Constraint: If the value is greater than 1000, we return only 1000 items.

" + "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

Constraint: If the value is greater than 1,000, we return only 1,000 items.

" }, "NextToken":{ "shape":"String", @@ -15816,12 +15975,12 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • service-name: The name of the service.

  • vpc-id: The ID of the VPC in which the endpoint resides.

  • vpc-endpoint-id: The ID of the endpoint.

  • vpc-endpoint-state - The state of the endpoint (pendingAcceptance | pending | available | deleting | deleted | rejected | failed).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters.

  • service-name - The name of the service.

  • vpc-id - The ID of the VPC in which the endpoint resides.

  • vpc-endpoint-id - The ID of the endpoint.

  • vpc-endpoint-state - The state of the endpoint (pendingAcceptance | pending | available | deleting | deleted | rejected | failed).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

Constraint: If the value is greater than 1000, we return only 1000 items.

" + "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

Constraint: If the value is greater than 1,000, we return only 1,000 items.

" }, "NextToken":{ "shape":"String", @@ -15904,7 +16063,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • cidr - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

  • cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC.

  • cidr-block-association.association-id - The association ID for an IPv4 CIDR block associated with the VPC.

  • cidr-block-association.state - The state of an IPv4 CIDR block associated with the VPC.

  • dhcp-options-id - The ID of a set of DHCP options.

  • ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.association-id - The association ID for an IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated with the VPC.

  • isDefault - Indicates whether the VPC is the default VPC.

  • owner-id - The ID of the AWS account that owns the VPC.

  • state - The state of the VPC (pending | available).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC.

", + "documentation":"

One or more filters.

  • cidr - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

  • cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC.

  • cidr-block-association.association-id - The association ID for an IPv4 CIDR block associated with the VPC.

  • cidr-block-association.state - The state of an IPv4 CIDR block associated with the VPC.

  • dhcp-options-id - The ID of a set of DHCP options.

  • ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.

  • ipv6-cidr-block-association.association-id - The association ID for an IPv6 CIDR block associated with the VPC.

  • ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated with the VPC.

  • isDefault - Indicates whether the VPC is the default VPC.

  • owner-id - The ID of the AWS account that owns the VPC.

  • state - The state of the VPC (pending | available).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC.

", "locationName":"Filter" }, "VpcIds":{ @@ -16069,7 +16228,7 @@ "required":["AttachmentId"], "members":{ "AttachmentId":{ - "shape":"AttachmentId", + "shape":"NetworkInterfaceAttachmentId", "documentation":"

The ID of the attachment.

", "locationName":"attachmentId" }, @@ -16520,7 +16679,7 @@ "type":"structure", "members":{ "AssociationId":{ - "shape":"String", + "shape":"ElasticIpAssociationId", "documentation":"

[EC2-VPC] The association ID. Required for EC2-VPC.

" }, "PublicIp":{ @@ -16611,7 +16770,7 @@ "required":["AssociationId"], "members":{ "AssociationId":{ - "shape":"String", + "shape":"SubnetCidrAssociationId", "documentation":"

The association ID for the CIDR block.

", "locationName":"associationId" } @@ -16829,7 +16988,7 @@ "members":{ "SizeInGB":{ "shape":"DiskSize", - "documentation":"

The size of the disk in GiB.

", + "documentation":"

The size of the disk in GB.

", "locationName":"sizeInGB" }, "Count":{ @@ -16883,6 +17042,14 @@ "locationName":"item" } }, + "DnsNameState":{ + "type":"string", + "enum":[ + "pendingVerification", + "verified", + "failed" + ] + }, "DnsServersOptionsModifyStructure":{ "type":"structure", "members":{ @@ -16937,17 +17104,17 @@ }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. If you set the type to io1, you must also specify the IOPS that the volume supports.

Default: gp2

", + "documentation":"

The volume type. If you set the type to io1, you must also specify the Iops parameter. If you set the type to gp2, st1, sc1, or standard, you must omit the Iops parameter.

Default: gp2

", "locationName":"volumeType" }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" + }, "Encrypted":{ "shape":"Boolean", "documentation":"

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

", "locationName":"encrypted" - }, - "KmsKeyId":{ - "shape":"String", - "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" } }, "documentation":"

Describes a block device for an EBS volume.

" @@ -17010,7 +17177,7 @@ "locationName":"deleteOnTermination" }, "VolumeId":{ - "shape":"String", + "shape":"VolumeId", "documentation":"

The ID of the EBS volume.

", "locationName":"volumeId" } @@ -17037,6 +17204,11 @@ "shape":"EgressOnlyInternetGatewayId", "documentation":"

The ID of the egress-only internet gateway.

", "locationName":"egressOnlyInternetGatewayId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the egress-only internet gateway.

", + "locationName":"tagSet" } }, "documentation":"

Describes an egress-only internet gateway.

" @@ -17120,7 +17292,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The type of Elastic Graphics accelerator.

" + "documentation":"

The type of Elastic Graphics accelerator. For more information about the values to specify for Type, see Elastic Graphics Basics, specifically the Elastic Graphics accelerator column, in the Amazon Elastic Compute Cloud User Guide for Windows Instances.

" } }, "documentation":"

A specification for an Elastic Graphics accelerator.

" @@ -17200,6 +17372,11 @@ "shape":"String", "documentation":"

The ID of the instance to which the Elastic Graphics accelerator is attached.

", "locationName":"instanceId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the Elastic Graphics accelerator.

", + "locationName":"tagSet" } }, "documentation":"

Describes an Elastic Graphics accelerator.

" @@ -17214,7 +17391,7 @@ }, "Count":{ "shape":"ElasticInferenceAcceleratorCount", - "documentation":"

The number of elastic inference accelerators of given type to be attached to the instance. Only positive values allowed. If not specified defaults to 1.

" + "documentation":"

The number of elastic inference accelerators to attach to the instance.

Default: 1

" } }, "documentation":"

Describes an elastic inference accelerator.

" @@ -17263,6 +17440,7 @@ "locationName":"item" } }, + "ElasticIpAssociationId":{"type":"string"}, "EnaSupport":{ "type":"string", "enum":[ @@ -17873,6 +18051,11 @@ "shape":"String", "documentation":"

The status message related to the export task.

", "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the export task.

", + "locationName":"tagSet" } }, "documentation":"

Describes an instance export task.

" @@ -18084,6 +18267,10 @@ "fulfilled" ] }, + "FleetCapacityReservationUsageStrategy":{ + "type":"string", + "enum":["use-capacity-reservations-first"] + }, "FleetData":{ "type":"structure", "members":{ @@ -18109,7 +18296,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", "locationName":"clientToken" }, "ExcessCapacityTerminationPolicy":{ @@ -18144,7 +18331,7 @@ }, "Type":{ "shape":"FleetType", - "documentation":"

The type of request. Indicates whether the EC2 Fleet only requests the target capacity, or also attempts to maintain it. If you request a certain target capacity, EC2 Fleet only places the required requests; it does not attempt to replenish instances if capacity is diminished, and does not submit requests in alternative capacity pools if capacity is unavailable. To maintain a certain target capacity, EC2 Fleet places the required requests to meet this target capacity. It also automatically replenishes any interrupted Spot Instances. Default: maintain.

", + "documentation":"

The type of request. Indicates whether the EC2 Fleet only requests the target capacity, or also attempts to maintain it. If you request a certain target capacity, EC2 Fleet only places the required requests; it does not attempt to replenish instances if capacity is diminished, and it does not submit requests in alternative capacity pools if capacity is unavailable. To maintain a certain target capacity, EC2 Fleet places the required requests to meet this target capacity. It also automatically replenishes any interrupted Spot Instances. Default: maintain.

", "locationName":"type" }, "ValidFrom":{ @@ -18481,6 +18668,11 @@ "shape":"String", "documentation":"

The format of the flow log record.

", "locationName":"logFormat" + }, + "MaxAggregationInterval":{ + "shape":"Integer", + "documentation":"

The maximum interval of time, in seconds, during which a flow of packets is captured and aggregated into a flow log record.

For network interfaces attached to Nitro-based instances, the aggregation interval is always 60 seconds (1 minute), regardless of the specified value.

", + "locationName":"maxAggregationInterval" } }, "documentation":"

Describes a flow log.

" @@ -18730,6 +18922,43 @@ "type":"string", "enum":["ipsec.1"] }, + "GetAssociatedIpv6PoolCidrsRequest":{ + "type":"structure", + "required":["PoolId"], + "members":{ + "PoolId":{ + "shape":"String", + "documentation":"

The ID of the IPv6 address pool.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "MaxResults":{ + "shape":"Ipv6PoolMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "GetAssociatedIpv6PoolCidrsResult":{ + "type":"structure", + "members":{ + "Ipv6CidrAssociations":{ + "shape":"Ipv6CidrAssociationSet", + "documentation":"

Information about the IPv6 CIDR block associations.

", + "locationName":"ipv6CidrAssociationSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "GetCapacityReservationUsageRequest":{ "type":"structure", "required":["CapacityReservationId"], @@ -19602,7 +19831,6 @@ }, "documentation":"

Describes the properties of the Dedicated Host.

" }, - "HostId":{"type":"string"}, "HostInstance":{ "type":"structure", "members":{ @@ -20041,6 +20269,16 @@ "documentation":"

This value is set to windows for Windows AMIs; otherwise, it is blank.

", "locationName":"platform" }, + "PlatformDetails":{ + "shape":"String", + "documentation":"

The platform details associated with the billing code of the AMI. For more information, see AMI Billing Information in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"platformDetails" + }, + "UsageOperation":{ + "shape":"String", + "documentation":"

The operation of the Amazon EC2 instance and the billing code associated with the AMI. usageOperation corresponds to the lineitem/Operation column on your AWS Cost and Usage Report. For more information, see AMI Billing Information in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"usageOperation" + }, "ProductCodes":{ "shape":"ProductCodeList", "documentation":"

Any product codes associated with the AMI.

", @@ -20299,7 +20537,7 @@ "locationName":"licenseConfigurationArn" } }, - "documentation":"

The response information of license configurations.

" + "documentation":"

The response information for license configurations.

" }, "ImportImageLicenseSpecificationListRequest":{ "type":"list", @@ -20352,8 +20590,8 @@ "documentation":"

The target hypervisor platform.

Valid values: xen

" }, "KmsKeyId":{ - "shape":"String", - "documentation":"

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted AMI. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID

  • Key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

  • ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the AMI is being copied to.

" + "shape":"KmsKeyId", + "documentation":"

An identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted AMI. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID

  • Key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

  • ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the AMI is being copied to.

Amazon EBS does not support asymmetric CMKs.

" }, "LicenseType":{ "shape":"String", @@ -20408,7 +20646,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create the encrypted AMI.

", + "documentation":"

The identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create the encrypted AMI.

", "locationName":"kmsKeyId" }, "LicenseType":{ @@ -20516,9 +20754,14 @@ "documentation":"

A descriptive status message for the import image task.

", "locationName":"statusMessage" }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the import image task.

", + "locationName":"tagSet" + }, "LicenseSpecifications":{ "shape":"ImportImageLicenseSpecificationListResponse", - "documentation":"

The ARNs of the license configurations associated to the import image task.

", + "documentation":"

The ARNs of the license configurations that are associated with the import image task.

", "locationName":"licenseSpecifications" } }, @@ -20720,7 +20963,7 @@ "locationName":"dryRun" }, "KeyName":{ - "shape":"String", + "shape":"KeyPairName", "documentation":"

A unique name for the key pair.

", "locationName":"keyName" }, @@ -20775,7 +21018,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID

  • Key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

  • ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

" + "documentation":"

An identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

  • Key ID

  • Key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

  • ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of the CMK owner, the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

Amazon EBS does not support asymmetric CMKs.

" }, "RoleName":{ "shape":"String", @@ -20820,6 +21063,11 @@ "shape":"SnapshotTaskDetail", "documentation":"

Describes an import snapshot task.

", "locationName":"snapshotTaskDetail" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the import snapshot task.

", + "locationName":"tagSet" } }, "documentation":"

Describes an import snapshot task.

" @@ -22135,7 +22383,7 @@ "members":{ "TotalSizeInGB":{ "shape":"DiskSize", - "documentation":"

The total size of the disks, in GiB.

", + "documentation":"

The total size of the disks, in GB.

", "locationName":"totalSizeInGB" }, "Disks":{ @@ -22452,10 +22700,10 @@ "documentation":"

Indicates whether the instance type is offered for spot or On-Demand.

", "locationName":"supportedUsageClasses" }, - "SupportedRootDevices":{ + "SupportedRootDeviceTypes":{ "shape":"RootDeviceTypeList", - "documentation":"

Indicates the supported root devices.

", - "locationName":"supportedRootDevices" + "documentation":"

Indicates the supported root device types.

", + "locationName":"supportedRootDeviceTypes" }, "BareMetal":{ "shape":"BareMetalFlag", @@ -22730,7 +22978,7 @@ }, "Description":{ "shape":"String", - "documentation":"

A description for the security group rule that references this IPv4 address range.

Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*

", + "documentation":"

A description for the security group rule that references this IPv4 address range.

Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

", "locationName":"description" } }, @@ -22758,6 +23006,29 @@ "locationName":"item" } }, + "Ipv6CidrAssociation":{ + "type":"structure", + "members":{ + "Ipv6Cidr":{ + "shape":"String", + "documentation":"

The IPv6 CIDR block.

", + "locationName":"ipv6Cidr" + }, + "AssociatedResource":{ + "shape":"String", + "documentation":"

The resource that's associated with the IPv6 CIDR block.

", + "locationName":"associatedResource" + } + }, + "documentation":"

Describes an IPv6 CIDR block association.

" + }, + "Ipv6CidrAssociationSet":{ + "type":"list", + "member":{ + "shape":"Ipv6CidrAssociation", + "locationName":"item" + } + }, "Ipv6CidrBlock":{ "type":"structure", "members":{ @@ -22777,6 +23048,44 @@ } }, "Ipv6Flag":{"type":"boolean"}, + "Ipv6Pool":{ + "type":"structure", + "members":{ + "PoolId":{ + "shape":"String", + "documentation":"

The ID of the address pool.

", + "locationName":"poolId" + }, + "Description":{ + "shape":"String", + "documentation":"

The description for the address pool.

", + "locationName":"description" + }, + "PoolCidrBlocks":{ + "shape":"PoolCidrBlocksSet", + "documentation":"

The CIDR blocks for the address pool.

", + "locationName":"poolCidrBlockSet" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags for the address pool.

", + "locationName":"tagSet" + } + }, + "documentation":"

Describes an IPv6 address pool.

" + }, + "Ipv6PoolMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "Ipv6PoolSet":{ + "type":"list", + "member":{ + "shape":"Ipv6Pool", + "locationName":"item" + } + }, "Ipv6Range":{ "type":"structure", "members":{ @@ -22787,7 +23096,7 @@ }, "Description":{ "shape":"String", - "documentation":"

A description for the security group rule that references this IPv6 address range.

Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*

", + "documentation":"

A description for the security group rule that references this IPv6 address range.

Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

", "locationName":"description" } }, @@ -22807,6 +23116,7 @@ "disable" ] }, + "KernelId":{"type":"string"}, "KeyNameStringList":{ "type":"list", "member":{ @@ -22831,13 +23141,30 @@ "shape":"String", "documentation":"

The name of the key pair.

", "locationName":"keyName" + }, + "KeyPairId":{ + "shape":"String", + "documentation":"

The ID of the key pair.

", + "locationName":"keyPairId" } }, "documentation":"

Describes a key pair.

" }, + "KeyPairIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"KeyPairId" + } + }, "KeyPairInfo":{ "type":"structure", "members":{ + "KeyPairId":{ + "shape":"String", + "documentation":"

The ID of the key pair.

", + "locationName":"keyPairId" + }, "KeyFingerprint":{ "shape":"String", "documentation":"

If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. If you used ImportKeyPair to provide AWS the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.

", @@ -22847,6 +23174,11 @@ "shape":"String", "documentation":"

The name of the key pair.

", "locationName":"keyName" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags applied to the key pair.

", + "locationName":"tagSet" } }, "documentation":"

Describes a key pair.

" @@ -22860,6 +23192,22 @@ }, "KeyPairName":{"type":"string"}, "KmsKeyId":{"type":"string"}, + "LastError":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

The error message for the VPC endpoint error.

", + "locationName":"message" + }, + "Code":{ + "shape":"String", + "documentation":"

The error code for the VPC endpoint error.

", + "locationName":"code" + } + }, + "documentation":"

The last error that occurred for a VPC endpoint.

" + }, "LaunchPermission":{ "type":"structure", "members":{ @@ -23244,7 +23592,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.

" + "documentation":"

The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for encryption.

" }, "SnapshotId":{ "shape":"String", @@ -23271,7 +23619,7 @@ }, "Count":{ "shape":"LaunchTemplateElasticInferenceAcceleratorCount", - "documentation":"

The number of elastic inference accelerators of given type to be attached to the instance. Only positive values allowed. If not specified defaults to 1.

" + "documentation":"

The number of elastic inference accelerators to attach to the instance.

Default: 1

" } }, "documentation":"

Describes an elastic inference accelerator.

" @@ -23297,7 +23645,7 @@ }, "Count":{ "shape":"Integer", - "documentation":"

The number of elastic inference accelerators of given type to be attached to the instance. Only positive values allowed. If not specified defaults to 1.

", + "documentation":"

The number of elastic inference accelerators to attach to the instance.

Default: 1

", "locationName":"count" } }, @@ -23342,6 +23690,13 @@ }, "documentation":"

Indicates whether the instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites.

" }, + "LaunchTemplateHttpTokensState":{ + "type":"string", + "enum":[ + "optional", + "required" + ] + }, "LaunchTemplateIamInstanceProfileSpecification":{ "type":"structure", "members":{ @@ -23403,6 +23758,64 @@ }, "documentation":"

The market (purchasing) option for the instances.

" }, + "LaunchTemplateInstanceMetadataEndpointState":{ + "type":"string", + "enum":[ + "disabled", + "enabled" + ] + }, + "LaunchTemplateInstanceMetadataOptions":{ + "type":"structure", + "members":{ + "State":{ + "shape":"LaunchTemplateInstanceMetadataOptionsState", + "documentation":"

The state of the metadata option changes.

pending - The metadata options are being updated and the instance is not ready to process metadata traffic with the new selection.

applied - The metadata options have been successfully applied on the instance.

", + "locationName":"state" + }, + "HttpTokens":{ + "shape":"LaunchTemplateHttpTokensState", + "documentation":"

The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional.

If the state is optional, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.

If the state is required, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns the version 2.0 credentials; the version 1.0 credentials are not available.

", + "locationName":"httpTokens" + }, + "HttpPutResponseHopLimit":{ + "shape":"Integer", + "documentation":"

The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.

Default: 1

Possible values: Integers from 1 to 64

", + "locationName":"httpPutResponseHopLimit" + }, + "HttpEndpoint":{ + "shape":"LaunchTemplateInstanceMetadataEndpointState", + "documentation":"

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

If you specify a value of disabled, you will not be able to access your instance metadata.

", + "locationName":"httpEndpoint" + } + }, + "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

" + }, + "LaunchTemplateInstanceMetadataOptionsRequest":{ + "type":"structure", + "members":{ + "HttpTokens":{ + "shape":"LaunchTemplateHttpTokensState", + "documentation":"

The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional.

If the state is optional, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.

If the state is required, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns the version 2.0 credentials; the version 1.0 credentials are not available.

" + }, + "HttpPutResponseHopLimit":{ + "shape":"Integer", + "documentation":"

The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.

Default: 1

Possible values: Integers from 1 to 64

" + }, + "HttpEndpoint":{ + "shape":"LaunchTemplateInstanceMetadataEndpointState", + "documentation":"

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

If you specify a value of disabled, you will not be able to access your instance metadata.

" + } + }, + "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

" + }, + "LaunchTemplateInstanceMetadataOptionsState":{ + "type":"string", + "enum":[ + "pending", + "applied" + ] + }, "LaunchTemplateInstanceNetworkInterfaceSpecification":{ "type":"structure", "members":{ @@ -23673,8 +24086,13 @@ }, "HostResourceGroupArn":{ "shape":"String", - "documentation":"

The ARN of the host resource group in which to launch the instances.

", + "documentation":"

The ARN of the host resource group in which to launch the instances.

", "locationName":"hostResourceGroupArn" + }, + "PartitionNumber":{ + "shape":"Integer", + "documentation":"

The number of the partition the instance should launch in. Valid only if the placement group strategy is set to partition.

", + "locationName":"partitionNumber" } }, "documentation":"

Describes the placement of an instance.

" @@ -23709,6 +24127,10 @@ "HostResourceGroupArn":{ "shape":"String", "documentation":"

The ARN of the host resource group in which to launch the instances. If you specify a host resource group ARN, omit the Tenancy parameter or set it to host.

" + }, + "PartitionNumber":{ + "shape":"Integer", + "documentation":"

The number of the partition the instance should launch in. Valid only if the placement group strategy is set to partition.

" } }, "documentation":"

Describes the placement of an instance.

" @@ -24063,6 +24485,11 @@ "shape":"String", "documentation":"

The state of the local gateway.

", "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the local gateway.

", + "locationName":"tagSet" } }, "documentation":"

Describes a local gateway.

" @@ -24149,6 +24576,11 @@ "shape":"String", "documentation":"

The state of the local gateway route table.

", "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the local gateway route table.

", + "locationName":"tagSet" } }, "documentation":"

Describes a local gateway route table.

" @@ -24194,6 +24626,11 @@ "shape":"String", "documentation":"

The state of the association.

", "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the association.

", + "locationName":"tagSet" } }, "documentation":"

Describes an association between a local gateway route table and a virtual interface group.

" @@ -24239,6 +24676,11 @@ "shape":"String", "documentation":"

The state of the association.

", "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the association.

", + "locationName":"tagSet" } }, "documentation":"

Describes an association between a local gateway route table and a VPC.

" @@ -24308,6 +24750,11 @@ "shape":"Integer", "documentation":"

The peer BGP ASN.

", "locationName":"peerBgpAsn" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the virtual interface.

", + "locationName":"tagSet" } }, "documentation":"

Describes a local gateway virtual interface.

" @@ -24329,6 +24776,11 @@ "shape":"String", "documentation":"

The ID of the local gateway.

", "locationName":"localGatewayId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the virtual interface group.

", + "locationName":"tagSet" } }, "documentation":"

Describes a local gateway virtual interface group.

" @@ -24462,6 +24914,10 @@ "shape":"DnsServersOptionsModifyStructure", "documentation":"

Information about the DNS servers to be used by Client VPN connections. A Client VPN endpoint can have up to two DNS servers.

" }, + "VpnPort":{ + "shape":"Integer", + "documentation":"

The port number to assign to the Client VPN endpoint for TCP and UDP traffic.

Valid Values: 443 | 1194

Default Value: 443

" + }, "Description":{ "shape":"String", "documentation":"

A brief description of the Client VPN endpoint.

" @@ -24523,7 +24979,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" + "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric CMKs.

" }, "DryRun":{ "shape":"Boolean", @@ -24733,7 +25189,7 @@ "documentation":"

A new description for the AMI.

" }, "ImageId":{ - "shape":"String", + "shape":"ImageId", "documentation":"

The ID of the AMI.

" }, "LaunchPermission":{ @@ -25011,7 +25467,7 @@ "documentation":"

The name of the placement group in which to place the instance. For spread placement groups, the instance must have a tenancy of default. For cluster and partition placement groups, the instance must have a tenancy of default or dedicated.

To remove an instance from a placement group, specify an empty string (\"\").

" }, "HostId":{ - "shape":"HostId", + "shape":"DedicatedHostId", "documentation":"

The ID of the Dedicated Host with which to associate the instance.

", "locationName":"hostId" }, @@ -25241,7 +25697,7 @@ "documentation":"

Specify true to indicate that ENIs attached to instances created in the specified subnet should be assigned a public IPv4 address.

" }, "SubnetId":{ - "shape":"String", + "shape":"SubnetId", "documentation":"

The ID of the subnet.

", "locationName":"subnetId" } @@ -25417,7 +25873,7 @@ }, "Options":{ "shape":"ModifyTransitGatewayVpcAttachmentRequestOptions", - "documentation":"

The new VPC attachment options.

" + "documentation":"

The new VPC attachment options.

You cannot modify the IPv6 options.

" }, "DryRun":{ "shape":"Boolean", @@ -25607,7 +26063,7 @@ }, "PrivateDnsEnabled":{ "shape":"Boolean", - "documentation":"

(Interface endpoint) Indicate whether a private hosted zone is associated with the VPC.

" + "documentation":"

(Interface endpoint) Indicates whether a private hosted zone is associated with the VPC.

" } }, "documentation":"

Contains the parameters for ModifyVpcEndpoint.

" @@ -25634,9 +26090,17 @@ "shape":"ServiceId", "documentation":"

The ID of the service.

" }, + "PrivateDnsName":{ + "shape":"String", + "documentation":"

The private DNS name to assign to the endpoint service.

" + }, + "RemovePrivateDnsName":{ + "shape":"Boolean", + "documentation":"

Removes the private DNS name of the endpoint service.

" + }, "AcceptanceRequired":{ "shape":"Boolean", - "documentation":"

Indicate whether requests to create an endpoint to your service must be accepted.

" + "documentation":"

Indicates whether requests to create an endpoint to your service must be accepted.

" }, "AddNetworkLoadBalancerArns":{ "shape":"ValueStringList", @@ -26525,6 +26989,7 @@ }, "documentation":"

Describes an attachment change.

" }, + "NetworkInterfaceAttachmentId":{"type":"string"}, "NetworkInterfaceAttribute":{ "type":"string", "enum":[ @@ -26764,14 +27229,19 @@ "documentation":"

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowest-price, EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to lowest-price.

", "locationName":"allocationStrategy" }, + "CapacityReservationOptions":{ + "shape":"CapacityReservationOptions", + "documentation":"

The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type instant.

", + "locationName":"capacityReservationOptions" + }, "SingleInstanceType":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet.

", + "documentation":"

Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet. Supported only for fleets of type instant.

", "locationName":"singleInstanceType" }, "SingleAvailabilityZone":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.

", + "documentation":"

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone. Supported only for fleets of type instant.

", "locationName":"singleAvailabilityZone" }, "MinTargetCapacity":{ @@ -26794,13 +27264,17 @@ "shape":"FleetOnDemandAllocationStrategy", "documentation":"

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowest-price, EC2 Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, EC2 Fleet uses the priority that you assigned to each launch template override, launching the highest priority first. If you do not specify a value, EC2 Fleet defaults to lowest-price.

" }, + "CapacityReservationOptions":{ + "shape":"CapacityReservationOptionsRequest", + "documentation":"

The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type instant.

" + }, "SingleInstanceType":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet.

" + "documentation":"

Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet. Supported only for fleets of type instant.

" }, "SingleAvailabilityZone":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.

" + "documentation":"

Indicates that the fleet launches all On-Demand Instances into a single Availability Zone. Supported only for fleets of type instant.

" }, "MinTargetCapacity":{ "shape":"Integer", @@ -27215,10 +27689,27 @@ "shape":"Integer", "documentation":"

The number of partitions. Valid only if strategy is set to partition.

", "locationName":"partitionCount" + }, + "GroupId":{ + "shape":"String", + "documentation":"

The ID of the placement group.

", + "locationName":"groupId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags applied to the placement group.

", + "locationName":"tagSet" } }, "documentation":"

Describes a placement group.

" }, + "PlacementGroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GroupId" + } + }, "PlacementGroupInfo":{ "type":"structure", "members":{ @@ -27271,7 +27762,7 @@ "members":{ "GroupName":{ "shape":"String", - "documentation":"

The name of the placement group the instance is in.

", + "documentation":"

The name of the placement group that the instance is in.

", "locationName":"groupName" } }, @@ -27289,6 +27780,24 @@ "type":"string", "enum":["Windows"] }, + "PoolCidrBlock":{ + "type":"structure", + "members":{ + "Cidr":{ + "shape":"String", + "documentation":"

The CIDR block.

", + "locationName":"poolCidrBlock" + } + }, + "documentation":"

Describes a CIDR block for an address pool.

" + }, + "PoolCidrBlocksSet":{ + "type":"list", + "member":{ + "shape":"PoolCidrBlock", + "locationName":"item" + } + }, "PoolMaxResults":{ "type":"integer", "max":10, @@ -27486,6 +27995,32 @@ "Role" ] }, + "PrivateDnsNameConfiguration":{ + "type":"structure", + "members":{ + "State":{ + "shape":"DnsNameState", + "documentation":"

The verification state of the VPC endpoint service.

>Consumers of the endpoint service can use the private name only when the state is verified.

", + "locationName":"state" + }, + "Type":{ + "shape":"String", + "documentation":"

The endpoint service verification type, for example TXT.

", + "locationName":"type" + }, + "Value":{ + "shape":"String", + "documentation":"

The value the service provider adds to the private DNS name domain record before verification.

", + "locationName":"value" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the record subdomain the service provider needs to create. The service provider adds the value text to the name.

", + "locationName":"name" + } + }, + "documentation":"

Information about the private DNS name for the service endpoint. For more information about these parameters, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" + }, "PrivateIpAddressConfigSet":{ "type":"list", "member":{ @@ -27605,12 +28140,16 @@ "members":{ "Cidr":{ "shape":"String", - "documentation":"

The public IPv4 address range, in CIDR notation. The most specific prefix that you can specify is /24. The address range cannot overlap with another address range that you've brought to this or another Region.

" + "documentation":"

The public IPv4 or IPv6 address range, in CIDR notation. The most specific IPv4 prefix that you can specify is /24. The most specific IPv6 prefix you can specify is /56. The address range cannot overlap with another address range that you've brought to this or another Region.

" }, "CidrAuthorizationContext":{ "shape":"CidrAuthorizationContext", "documentation":"

A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP.

" }, + "PubliclyAdvertisable":{ + "shape":"Boolean", + "documentation":"

(IPv6 only) Indicate whether the address range will be publicly advertised to the internet.

Default: true

" + }, "Description":{ "shape":"String", "documentation":"

A description for the address range and the address pool.

" @@ -27626,7 +28165,7 @@ "members":{ "ByoipCidr":{ "shape":"ByoipCidr", - "documentation":"

Information about the address pool.

", + "documentation":"

Information about the address range.

", "locationName":"byoipCidr" } } @@ -27674,7 +28213,7 @@ "members":{ "PoolId":{ "shape":"String", - "documentation":"

The ID of the IPv4 address pool.

", + "documentation":"

The ID of the address pool.

", "locationName":"poolId" }, "Description":{ @@ -27698,7 +28237,7 @@ "locationName":"totalAvailableAddressCount" } }, - "documentation":"

Describes an address pool.

" + "documentation":"

Describes an IPv4 address pool.

" }, "PublicIpv4PoolRange":{ "type":"structure", @@ -27969,6 +28508,7 @@ "Windows (Amazon VPC)" ] }, + "RamdiskId":{"type":"string"}, "ReasonCodesList":{ "type":"list", "member":{ @@ -28088,7 +28628,7 @@ "locationName":"enaSupport" }, "KernelId":{ - "shape":"String", + "shape":"KernelId", "documentation":"

The ID of the kernel.

", "locationName":"kernelId" }, @@ -28103,7 +28643,7 @@ "locationName":"BillingProduct" }, "RamdiskId":{ - "shape":"String", + "shape":"RamdiskId", "documentation":"

The ID of the RAM disk.

", "locationName":"ramdiskId" }, @@ -28308,7 +28848,7 @@ "type":"structure", "members":{ "AllocationId":{ - "shape":"String", + "shape":"AllocationId", "documentation":"

[EC2-VPC] The allocation ID. Required for EC2-VPC.

" }, "PublicIp":{ @@ -28697,7 +29237,7 @@ "RequestHostIdSet":{ "type":"list", "member":{ - "shape":"HostId", + "shape":"DedicatedHostId", "locationName":"item" } }, @@ -28724,7 +29264,7 @@ }, "BlockDeviceMappings":{ "shape":"LaunchTemplateBlockDeviceMappingRequestList", - "documentation":"

The block device mapping.

Supplying both a snapshot ID and an encryption value as arguments for block-device mapping results in an error. This is because only blank volumes can be encrypted on start, and these are not created from a snapshot. If a snapshot is the basis for the volume, it contains data by definition and its encryption status cannot be changed using this action.

", + "documentation":"

The block device mapping.

", "locationName":"BlockDeviceMapping" }, "NetworkInterfaces":{ @@ -28817,6 +29357,10 @@ "HibernationOptions":{ "shape":"LaunchTemplateHibernationOptionsRequest", "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + }, + "MetadataOptions":{ + "shape":"LaunchTemplateInstanceMetadataOptionsRequest", + "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

" } }, "documentation":"

The information to include in the launch template.

" @@ -29612,7 +30156,7 @@ "documentation":"

The attribute to reset (currently you can only reset the launch permission attribute).

" }, "ImageId":{ - "shape":"String", + "shape":"ImageId", "documentation":"

The ID of the AMI.

" }, "DryRun":{ @@ -29716,10 +30260,12 @@ "image", "instance", "internet-gateway", + "key-pair", "launch-template", "natgateway", "network-acl", "network-interface", + "placement-group", "reserved-instances", "route-table", "security-group", @@ -29898,6 +30444,11 @@ "shape":"LaunchTemplateHibernationOptions", "documentation":"

Indicates whether an instance is configured for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"hibernationOptions" + }, + "MetadataOptions":{ + "shape":"LaunchTemplateInstanceMetadataOptions", + "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"metadataOptions" } }, "documentation":"

The information for a launch template.

" @@ -31343,7 +31894,7 @@ }, "AvailabilityZones":{ "shape":"ValueStringList", - "documentation":"

In the Availability Zones in which the service is available.

", + "documentation":"

The Availability Zones in which the service is available.

", "locationName":"availabilityZoneSet" }, "AcceptanceRequired":{ @@ -31353,7 +31904,7 @@ }, "ManagesVpcEndpoints":{ "shape":"Boolean", - "documentation":"

Indicates whether the service manages it's VPC endpoints. Management of the service VPC endpoints using the VPC endpoint API is restricted.

", + "documentation":"

Indicates whether the service manages its VPC endpoints. Management of the service VPC endpoints using the VPC endpoint API is restricted.

", "locationName":"managesVpcEndpoints" }, "NetworkLoadBalancerArns":{ @@ -31371,6 +31922,11 @@ "documentation":"

The private DNS name for the service.

", "locationName":"privateDnsName" }, + "PrivateDnsNameConfiguration":{ + "shape":"PrivateDnsNameConfiguration", + "documentation":"

Information about the endpoint service private DNS name configuration.

", + "locationName":"privateDnsNameConfiguration" + }, "Tags":{ "shape":"TagList", "documentation":"

Any tags assigned to the service.

", @@ -31436,13 +31992,18 @@ }, "ManagesVpcEndpoints":{ "shape":"Boolean", - "documentation":"

Indicates whether the service manages it's VPC endpoints. Management of the service VPC endpoints using the VPC endpoint API is restricted.

", + "documentation":"

Indicates whether the service manages its VPC endpoints. Management of the service VPC endpoints using the VPC endpoint API is restricted.

", "locationName":"managesVpcEndpoints" }, "Tags":{ "shape":"TagList", "documentation":"

Any tags assigned to the service.

", "locationName":"tagSet" + }, + "PrivateDnsNameVerificationState":{ + "shape":"DnsNameState", + "documentation":"

The verification state of the VPC endpoint service.

Consumers of the endpoint service cannot use the private name when the state is not verified.

", + "locationName":"privateDnsNameVerificationState" } }, "documentation":"

Describes a VPC endpoint service.

" @@ -32380,7 +32941,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", "locationName":"allocationStrategy" }, "InstanceInterruptionBehavior":{ @@ -32395,12 +32956,12 @@ }, "SingleInstanceType":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet.

", + "documentation":"

Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet. Supported only for fleets of type instant.

", "locationName":"singleInstanceType" }, "SingleAvailabilityZone":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet launches all Spot Instances into a single Availability Zone.

", + "documentation":"

Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type instant.

", "locationName":"singleAvailabilityZone" }, "MinTargetCapacity":{ @@ -32421,7 +32982,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

" + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

" }, "InstanceInterruptionBehavior":{ "shape":"SpotInstanceInterruptionBehavior", @@ -32433,11 +32994,11 @@ }, "SingleInstanceType":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet.

" + "documentation":"

Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet. Supported only for fleets of type instant.

" }, "SingleAvailabilityZone":{ "shape":"Boolean", - "documentation":"

Indicates that the fleet launches all Spot Instances into a single Availability Zone.

" + "documentation":"

Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type instant.

" }, "MinTargetCapacity":{ "shape":"Integer", @@ -32626,6 +33187,30 @@ } } }, + "StartVpcEndpointServicePrivateDnsVerificationRequest":{ + "type":"structure", + "required":["ServiceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "ServiceId":{ + "shape":"ServiceId", + "documentation":"

The ID of the endpoint service.

" + } + } + }, + "StartVpcEndpointServicePrivateDnsVerificationResult":{ + "type":"structure", + "members":{ + "ReturnValue":{ + "shape":"Boolean", + "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", + "locationName":"return" + } + } + }, "State":{ "type":"string", "enum":[ @@ -32840,6 +33425,7 @@ "locationName":"item" } }, + "SubnetCidrAssociationId":{"type":"string"}, "SubnetCidrBlockState":{ "type":"structure", "members":{ @@ -33024,7 +33610,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host | fleet | fpga-image | instance | launch-template | snapshot | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | volume.

To tag a resource after it has been created, see CreateTags.

", + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host | fleet | fpga-image | instance | key-pair | launch-template | placement-group | snapshot | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | volume.

To tag a resource after it has been created, see CreateTags.

", "locationName":"resourceType" }, "Tags":{ @@ -33066,7 +33652,7 @@ "locationName":"defaultTargetCapacityType" } }, - "documentation":"

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice, or both to ensure your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and

" + "documentation":"

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice, or both to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and

" }, "TargetCapacitySpecificationRequest":{ "type":"structure", @@ -33089,7 +33675,7 @@ "documentation":"

The default TotalTargetCapacity, which is either Spot or On-Demand.

" } }, - "documentation":"

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and .

" + "documentation":"

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and .

" }, "TargetConfiguration":{ "type":"structure", @@ -35875,7 +36461,7 @@ }, "Groups":{ "shape":"GroupIdentifierSet", - "documentation":"

(Interface endpoint) Information about the security groups associated with the network interface.

", + "documentation":"

(Interface endpoint) Information about the security groups that are associated with the network interface.

", "locationName":"groupSet" }, "PrivateDnsEnabled":{ @@ -35900,7 +36486,7 @@ }, "CreationTimestamp":{ "shape":"MillisecondDateTime", - "documentation":"

The date and time the VPC endpoint was created.

", + "documentation":"

The date and time that the VPC endpoint was created.

", "locationName":"creationTimestamp" }, "Tags":{ @@ -35912,6 +36498,11 @@ "shape":"String", "documentation":"

The ID of the AWS account that owns the VPC endpoint.

", "locationName":"ownerId" + }, + "LastError":{ + "shape":"LastError", + "documentation":"

The last error that occurred for VPC endpoint.

", + "locationName":"lastError" } }, "documentation":"

Describes a VPC endpoint.

" @@ -35941,7 +36532,7 @@ }, "CreationTimestamp":{ "shape":"MillisecondDateTime", - "documentation":"

The date and time the VPC endpoint was created.

", + "documentation":"

The date and time that the VPC endpoint was created.

", "locationName":"creationTimestamp" }, "DnsEntries":{ @@ -36005,6 +36596,11 @@ "documentation":"

Information about the state of the CIDR block.

", "locationName":"ipv6CidrBlockState" }, + "Ipv6Pool":{ + "shape":"String", + "documentation":"

The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated.

", + "locationName":"ipv6Pool" + }, "NetworkBorderGroup":{ "shape":"String", "documentation":"

The name of the location from which we advertise the IPV6 CIDR block.

", @@ -36486,7 +37082,7 @@ "members":{ "Cidr":{ "shape":"String", - "documentation":"

The public IPv4 address range, in CIDR notation.

" + "documentation":"

The address range, in CIDR notation.

" }, "DryRun":{ "shape":"Boolean", diff --git a/botocore/data/ecr/2015-09-21/service-2.json b/botocore/data/ecr/2015-09-21/service-2.json index 09271105..182d8f86 100644 --- a/botocore/data/ecr/2015-09-21/service-2.json +++ b/botocore/data/ecr/2015-09-21/service-2.json @@ -27,7 +27,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServerException"} ], - "documentation":"

Check the availability of multiple image layers in a specified registry and repository.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Checks the availability of one or more image layers in a repository.

When an image is pushed to a repository, each image layer is checked to verify if it has been uploaded before. If it is, then the image layer is skipped.

When an image is pulled from a repository, each image layer is checked once to verify it is available to be pulled.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "BatchDeleteImage":{ "name":"BatchDeleteImage", @@ -42,7 +42,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Deletes a list of specified images within a specified repository. Images are specified with either imageTag or imageDigest.

You can remove a tag from an image by specifying the image's tag in your request. When you remove the last tag from an image, the image is deleted from your repository.

You can completely delete an image (and all of its tags) by specifying the image's digest in your request.

" + "documentation":"

Deletes a list of specified images within a repository. Images are specified with either an imageTag or imageDigest.

You can remove a tag from an image by specifying the image's tag in your request. When you remove the last tag from an image, the image is deleted from your repository.

You can completely delete an image (and all of its tags) by specifying the image's digest in your request.

" }, "BatchGetImage":{ "name":"BatchGetImage", @@ -57,7 +57,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Gets detailed information for specified images within a specified repository. Images are specified with either imageTag or imageDigest.

" + "documentation":"

Gets detailed information for an image. Images are specified with either an imageTag or imageDigest.

When an image is pulled, the BatchGetImage API is called once to retrieve the image manifest.

" }, "CompleteLayerUpload":{ "name":"CompleteLayerUpload", @@ -77,7 +77,7 @@ {"shape":"LayerAlreadyExistsException"}, {"shape":"EmptyUploadException"} ], - "documentation":"

Informs Amazon ECR that the image layer upload has completed for a specified registry, repository name, and upload ID. You can optionally provide a sha256 digest of the image layer for data validation purposes.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Informs Amazon ECR that the image layer upload has completed for a specified registry, repository name, and upload ID. You can optionally provide a sha256 digest of the image layer for data validation purposes.

When an image is pushed, the CompleteLayerUpload API is called once per each new image layer to verify that the upload has completed.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "CreateRepository":{ "name":"CreateRepository", @@ -95,7 +95,7 @@ {"shape":"RepositoryAlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an Amazon Elastic Container Registry (Amazon ECR) repository, where users can push and pull Docker images. For more information, see Amazon ECR Repositories in the Amazon Elastic Container Registry User Guide.

" + "documentation":"

Creates a repository. For more information, see Amazon ECR Repositories in the Amazon Elastic Container Registry User Guide.

" }, "DeleteLifecyclePolicy":{ "name":"DeleteLifecyclePolicy", @@ -111,7 +111,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"LifecyclePolicyNotFoundException"} ], - "documentation":"

Deletes the specified lifecycle policy.

" + "documentation":"

Deletes the lifecycle policy associated with the specified repository.

" }, "DeleteRepository":{ "name":"DeleteRepository", @@ -127,7 +127,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"RepositoryNotEmptyException"} ], - "documentation":"

Deletes an existing image repository. If a repository contains images, you must use the force option to delete it.

" + "documentation":"

Deletes a repository. If the repository contains images, you must either delete all images in the repository or use the force option to delete the repository.

" }, "DeleteRepositoryPolicy":{ "name":"DeleteRepositoryPolicy", @@ -143,7 +143,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"RepositoryPolicyNotFoundException"} ], - "documentation":"

Deletes the repository policy from a specified repository.

" + "documentation":"

Deletes the repository policy associated with the specified repository.

" }, "DescribeImageScanFindings":{ "name":"DescribeImageScanFindings", @@ -160,7 +160,7 @@ {"shape":"ImageNotFoundException"}, {"shape":"ScanNotFoundException"} ], - "documentation":"

Describes the image scan findings for the specified image.

" + "documentation":"

Returns the scan findings for the specified image.

" }, "DescribeImages":{ "name":"DescribeImages", @@ -176,7 +176,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"ImageNotFoundException"} ], - "documentation":"

Returns metadata about the images in a repository, including image size, image tags, and creation date.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" + "documentation":"

Returns metadata about the images in a repository.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" }, "DescribeRepositories":{ "name":"DescribeRepositories", @@ -205,7 +205,7 @@ {"shape":"ServerException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Retrieves a token that is valid for a specified registry for 12 hours. This command allows you to use the docker CLI to push and pull images with Amazon ECR. If you do not specify a registry, the default registry is assumed.

The authorizationToken returned for each registry specified is a base64 encoded string that can be decoded and used in a docker login command to authenticate to a registry. The AWS CLI offers an aws ecr get-login command that simplifies the login process.

" + "documentation":"

Retrieves an authorization token. An authorization token represents your IAM authentication credentials and can be used to access any Amazon ECR registry that your IAM principal has access to. The authorization token is valid for 12 hours.

The authorizationToken returned is a base64 encoded string that can be decoded and used in a docker login command to authenticate to a registry. The AWS CLI offers an get-login-password command that simplifies the login process. For more information, see Registry Authentication in the Amazon Elastic Container Registry User Guide.

" }, "GetDownloadUrlForLayer":{ "name":"GetDownloadUrlForLayer", @@ -222,7 +222,7 @@ {"shape":"LayerInaccessibleException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can only get URLs for image layers that are referenced in an image.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can only get URLs for image layers that are referenced in an image.

When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "GetLifecyclePolicy":{ "name":"GetLifecyclePolicy", @@ -238,7 +238,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"LifecyclePolicyNotFoundException"} ], - "documentation":"

Retrieves the specified lifecycle policy.

" + "documentation":"

Retrieves the lifecycle policy for the specified repository.

" }, "GetLifecyclePolicyPreview":{ "name":"GetLifecyclePolicyPreview", @@ -254,7 +254,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"LifecyclePolicyPreviewNotFoundException"} ], - "documentation":"

Retrieves the results of the specified lifecycle policy preview request.

" + "documentation":"

Retrieves the results of the lifecycle policy preview request for the specified repository.

" }, "GetRepositoryPolicy":{ "name":"GetRepositoryPolicy", @@ -270,7 +270,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"RepositoryPolicyNotFoundException"} ], - "documentation":"

Retrieves the repository policy for a specified repository.

" + "documentation":"

Retrieves the repository policy for the specified repository.

" }, "InitiateLayerUpload":{ "name":"InitiateLayerUpload", @@ -285,7 +285,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Notify Amazon ECR that you intend to upload an image layer.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Notifies Amazon ECR that you intend to upload an image layer.

When an image is pushed, the InitiateLayerUpload API is called once per image layer that has not already been uploaded. Whether an image layer has been uploaded before is determined by the BatchCheckLayerAvailability API action.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "ListImages":{ "name":"ListImages", @@ -300,7 +300,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Lists all the image IDs for a given repository.

You can filter images based on whether or not they are tagged by setting the tagStatus parameter to TAGGED or UNTAGGED. For example, you can filter your results to return only UNTAGGED images and then pipe that result to a BatchDeleteImage operation to delete them. Or, you can filter your results to return only TAGGED images to list all of the tags in your repository.

" + "documentation":"

Lists all the image IDs for the specified repository.

You can filter images based on whether or not they are tagged by using the tagStatus filter and specifying either TAGGED, UNTAGGED or ANY. For example, you can filter your results to return only UNTAGGED images and then pipe that result to a BatchDeleteImage operation to delete them. Or, you can filter your results to return only TAGGED images to list all of the tags in your repository.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -334,7 +334,7 @@ {"shape":"LimitExceededException"}, {"shape":"ImageTagAlreadyExistsException"} ], - "documentation":"

Creates or updates the image manifest and tags associated with an image.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Creates or updates the image manifest and tags associated with an image.

When an image is pushed and all new image layers have been uploaded, the PutImage API is called once to create or update the image manifest and tags associated with the image.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "PutImageScanningConfiguration":{ "name":"PutImageScanningConfiguration", @@ -349,7 +349,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Updates the image scanning configuration for a repository.

" + "documentation":"

Updates the image scanning configuration for the specified repository.

" }, "PutImageTagMutability":{ "name":"PutImageTagMutability", @@ -364,7 +364,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Updates the image tag mutability settings for a repository. When a repository is configured with tag immutability, all image tags within the repository will be prevented them from being overwritten. For more information, see Image Tag Mutability in the Amazon Elastic Container Registry User Guide.

" + "documentation":"

Updates the image tag mutability settings for the specified repository. For more information, see Image Tag Mutability in the Amazon Elastic Container Registry User Guide.

" }, "PutLifecyclePolicy":{ "name":"PutLifecyclePolicy", @@ -379,7 +379,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Creates or updates a lifecycle policy. For information about lifecycle policy syntax, see Lifecycle Policy Template.

" + "documentation":"

Creates or updates the lifecycle policy for the specified repository. For more information, see Lifecycle Policy Template.

" }, "SetRepositoryPolicy":{ "name":"SetRepositoryPolicy", @@ -394,7 +394,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Applies a repository policy on a specified repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" + "documentation":"

Applies a repository policy to the specified repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" }, "StartImageScan":{ "name":"StartImageScan", @@ -427,7 +427,7 @@ {"shape":"LifecyclePolicyNotFoundException"}, {"shape":"LifecyclePolicyPreviewInProgressException"} ], - "documentation":"

Starts a preview of the specified lifecycle policy. This allows you to see the results before creating the lifecycle policy.

" + "documentation":"

Starts a preview of a lifecycle policy for the specified repository. This allows you to see the results before associating the lifecycle policy with the repository.

" }, "TagResource":{ "name":"TagResource", @@ -479,7 +479,7 @@ {"shape":"UploadNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Uploads an image layer part to Amazon ECR.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Uploads an image layer part to Amazon ECR.

When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" } }, "shapes":{ @@ -1014,7 +1014,7 @@ "members":{ "registryIds":{ "shape":"GetAuthorizationTokenRegistryIdList", - "documentation":"

A list of AWS account IDs that are associated with the registries for which to get authorization tokens. If you do not specify a registry, the default registry is assumed.

" + "documentation":"

A list of AWS account IDs that are associated with the registries for which to get AuthorizationData objects. If you do not specify a registry, the default registry is assumed.

" } } }, diff --git a/botocore/data/ecr/2015-09-21/waiters-2.json b/botocore/data/ecr/2015-09-21/waiters-2.json new file mode 100644 index 00000000..9ef9608f --- /dev/null +++ b/botocore/data/ecr/2015-09-21/waiters-2.json @@ -0,0 +1,45 @@ +{ + "version": 2, + "waiters": { + "ImageScanComplete": { + "description": "Wait until an image scan is complete and findings can be accessed", + "operation": "DescribeImageScanFindings", + "delay": 5, + "maxAttempts": 60, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "imageScanStatus.status", + "expected": "COMPLETE" + }, + { + "state": "failure", + "matcher": "path", + "argument": "imageScanStatus.status", + "expected": "FAILED" + } + ] + }, + "LifecyclePolicyPreviewComplete": { + "description": "Wait until a lifecycle policy preview request is complete and results can be accessed", + "operation": "GetLifecyclePolicyPreview", + "delay": 5, + "maxAttempts": 20, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "status", + "expected": "COMPLETE" + }, + { + "state": "failure", + "matcher": "path", + "argument": "status", + "expected": "FAILED" + } + ] + } + } +} \ No newline at end of file diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 86fcedbc..44764a5d 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -134,7 +134,7 @@ {"shape":"ClusterContainsTasksException"}, {"shape":"UpdateInProgressException"} ], - "documentation":"

Deletes the specified cluster. You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

" + "documentation":"

Deletes the specified cluster. The cluster will transition to the INACTIVE state. Clusters with an INACTIVE status may remain discoverable in your account for a period of time. However, this behavior is subject to change in the future, so you should not rely on INACTIVE clusters persisting.

You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

" }, "DeleteService":{ "name":"DeleteService", @@ -940,7 +940,7 @@ }, "managedTerminationProtection":{ "shape":"ManagedTerminationProtection", - "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the AWS Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" + "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When using managed termination protection, managed scaling must also be used otherwise managed termination protection will not work.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the AWS Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" } }, "documentation":"

The details of the Auto Scaling group for the capacity provider.

" @@ -1076,7 +1076,7 @@ }, "status":{ "shape":"String", - "documentation":"

The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that you can register container instances with the cluster and the associated instances can accept tasks.

" + "documentation":"

The status of the cluster. The following are the possible states that will be returned.

ACTIVE

The cluster is ready to accept tasks and if applicable you can register container instances with the cluster.

PROVISIONING

The cluster has capacity providers associated with it and the resources needed for the capacity provider are being created.

DEPROVISIONING

The cluster has capacity providers associated with it and the resources needed for the capacity provider are being deleted.

FAILED

The cluster has capacity providers associated with it and the resources needed for the capacity provider have failed to create.

INACTIVE

The cluster has been deleted. Clusters with an INACTIVE status may remain discoverable in your account for a period of time. However, this behavior is subject to change in the future, so you should not rely on INACTIVE clusters persisting.

" }, "registeredContainerInstancesCount":{ "shape":"Integer", @@ -1306,7 +1306,7 @@ }, "cpu":{ "shape":"Integer", - "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" + "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" }, "memory":{ "shape":"BoxedInteger", @@ -1406,7 +1406,7 @@ }, "dockerSecurityOptions":{ "shape":"StringList", - "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

With Windows containers, this parameter can be used to reference a credential spec file when configuring a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "interactive":{ "shape":"BoxedBoolean", @@ -1471,7 +1471,7 @@ "documentation":"

The dependency condition of the container. The following are the available conditions and their behavior:

  • START - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.

  • COMPLETE - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit.

  • SUCCESS - This condition is the same as COMPLETE, but it also requires that the container exits with a zero status.

  • HEALTHY - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.

" } }, - "documentation":"

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

Your Amazon ECS container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

If you are using tasks that use the Fargate launch type, container dependency parameters are not supported.

" + "documentation":"

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

Your Amazon ECS container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later.

" }, "ContainerInstance":{ "type":"structure", @@ -1861,6 +1861,10 @@ "clientToken":{ "shape":"String", "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } } }, @@ -2302,6 +2306,10 @@ "taskSets":{ "shape":"StringList", "documentation":"

The ID or full Amazon Resource Name (ARN) of task sets to describe.

" + }, + "include":{ + "shape":"TaskSetFieldList", + "documentation":"

Specifies whether to see the resource tags for the task set. If TAGS is specified, the tags are included in the response. If this field is omitted, tags are not included in the response.

" } } }, @@ -2450,6 +2458,21 @@ "documentation":"

This parameter is specified when you are using Docker volumes. Docker volumes are only supported when you are using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" }, "Double":{"type":"double"}, + "EFSVolumeConfiguration":{ + "type":"structure", + "required":["fileSystemId"], + "members":{ + "fileSystemId":{ + "shape":"String", + "documentation":"

The Amazon EFS file system ID to use.

" + }, + "rootDirectory":{ + "shape":"String", + "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host.

" + } + }, + "documentation":"

This parameter is specified when you are using an Amazon Elastic File System (Amazon EFS) file storage. Amazon EFS file systems are only supported when you are using the EC2 launch type.

EFSVolumeConfiguration remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview of EFSVolumeConfiguration.

" + }, "EnvironmentVariables":{ "type":"list", "member":{"shape":"KeyValuePair"} @@ -3070,7 +3093,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

The log driver to use for the container. The valid values listed earlier are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -3081,7 +3104,7 @@ "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

Log configuration options to send to a custom log driver for the container.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run . By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks using the EC2 launch type, the Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks using the Fargate launch type, because you do not have access to the underlying infrastructure your tasks are hosted on, any additional software needed will have to be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" }, "LogConfigurationOptionsMap":{ "type":"map", @@ -3495,7 +3518,7 @@ }, "capacityProviders":{ "shape":"StringList", - "documentation":"

The short name or full Amazon Resource Name (ARN) of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

" + "documentation":"

The name of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

" }, "defaultCapacityProviderStrategy":{ "shape":"CapacityProviderStrategy", @@ -4582,7 +4605,7 @@ }, "family":{ "shape":"String", - "documentation":"

The name of a family that this task definition is registered to. A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.

" + "documentation":"

The name of a family that this task definition is registered to. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.

" }, "taskRoleArn":{ "shape":"String", @@ -4824,10 +4847,22 @@ "stabilityStatusAt":{ "shape":"Timestamp", "documentation":"

The Unix timestamp for when the task set stability status was retrieved.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" } }, "documentation":"

Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL deployment. An Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and whether the task set serves production traffic.

" }, + "TaskSetField":{ + "type":"string", + "enum":["TAGS"] + }, + "TaskSetFieldList":{ + "type":"list", + "member":{"shape":"TaskSetField"} + }, "TaskSetNotFoundException":{ "type":"structure", "members":{ @@ -5189,7 +5224,11 @@ }, "dockerVolumeConfiguration":{ "shape":"DockerVolumeConfiguration", - "documentation":"

This parameter is specified when you are using Docker volumes. Docker volumes are only supported when you are using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" + "documentation":"

This parameter is specified when you are using Docker volumes. Docker volumes are only supported when you are using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify the host parameter instead.

" + }, + "efsVolumeConfiguration":{ + "shape":"EFSVolumeConfiguration", + "documentation":"

This parameter is specified when you are using an Amazon Elastic File System (Amazon EFS) file storage. Amazon EFS file systems are only supported when you are using the EC2 launch type.

EFSVolumeConfiguration remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview of EFSVolumeConfiguration.

" } }, "documentation":"

A data volume used in a task definition. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index ea335aa3..fe0e7e9a 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -11,6 +11,24 @@ "uid":"elasticfilesystem-2015-02-01" }, "operations":{ + "CreateAccessPoint":{ + "name":"CreateAccessPoint", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/access-points", + "responseCode":200 + }, + "input":{"shape":"CreateAccessPointRequest"}, + "output":{"shape":"AccessPointDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"AccessPointAlreadyExists"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"AccessPointLimitExceeded"} + ], + "documentation":"

Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in its own directory and below. To learn more, see Mounting a File System Using EFS Access Points.

This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

" + }, "CreateFileSystem":{ "name":"CreateFileSystem", "http":{ @@ -68,7 +86,24 @@ {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. If you add the Name tag to your file system, Amazon EFS returns it in the response to the DescribeFileSystems operation.

This operation requires permission for the elasticfilesystem:CreateTags action.

" + "documentation":"

Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. If you add the Name tag to your file system, Amazon EFS returns it in the response to the DescribeFileSystems operation.

This operation requires permission for the elasticfilesystem:CreateTags action.

", + "deprecated":true, + "deprecatedMessage":"Use TagResource." + }, + "DeleteAccessPoint":{ + "name":"DeleteAccessPoint", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/access-points/{AccessPointId}", + "responseCode":204 + }, + "input":{"shape":"DeleteAccessPointRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"AccessPointNotFound"} + ], + "documentation":"

Deletes the specified access point. After deletion is complete, new clients can no longer connect to the access points. Clients connected to the access point at the time of deletion will continue to function until they terminate their connection.

This operation requires permissions for the elasticfilesystem:DeleteAccessPoint action.

" }, "DeleteFileSystem":{ "name":"DeleteFileSystem", @@ -86,6 +121,21 @@ ], "documentation":"

Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system.

You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. For more information, see DescribeMountTargets and DeleteMountTarget.

The DeleteFileSystem call returns while the file system state is still deleting. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass file system ID or creation token for the deleted file system, the DescribeFileSystems returns a 404 FileSystemNotFound error.

This operation requires permissions for the elasticfilesystem:DeleteFileSystem action.

" }, + "DeleteFileSystemPolicy":{ + "name":"DeleteFileSystemPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/policy", + "responseCode":200 + }, + "input":{"shape":"DeleteFileSystemPolicyRequest"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"IncorrectFileSystemLifeCycleState"} + ], + "documentation":"

Deletes the FileSystemPolicy for the specified file system. The default FileSystemPolicy goes into effect once the existing policy is deleted. For more information about the default file system policy, see Using Resource-based Policies with EFS.

This operation requires permissions for the elasticfilesystem:DeleteFileSystemPolicy action.

" + }, "DeleteMountTarget":{ "name":"DeleteMountTarget", "http":{ @@ -115,7 +165,42 @@ {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag Restrictions in the AWS Billing and Cost Management User Guide.

This operation requires permissions for the elasticfilesystem:DeleteTags action.

" + "documentation":"

Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag Restrictions in the AWS Billing and Cost Management User Guide.

This operation requires permissions for the elasticfilesystem:DeleteTags action.

", + "deprecated":true, + "deprecatedMessage":"Use UntagResource." + }, + "DescribeAccessPoints":{ + "name":"DescribeAccessPoints", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/access-points", + "responseCode":200 + }, + "input":{"shape":"DescribeAccessPointsRequest"}, + "output":{"shape":"DescribeAccessPointsResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"AccessPointNotFound"} + ], + "documentation":"

Returns the description of a specific Amazon EFS access point if the AccessPointId is provided. If you provide an EFS FileSystemId, it returns descriptions of all access points for that file system. You can provide either an AccessPointId or a FileSystemId in the request, but not both.

This operation requires permissions for the elasticfilesystem:DescribeAccessPoints action.

" + }, + "DescribeFileSystemPolicy":{ + "name":"DescribeFileSystemPolicy", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/policy", + "responseCode":200 + }, + "input":{"shape":"DescribeFileSystemPolicyRequest"}, + "output":{"shape":"FileSystemPolicyDescription"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"PolicyNotFound"} + ], + "documentation":"

Returns the FileSystemPolicy for the specified EFS file system.

This operation requires permissions for the elasticfilesystem:DescribeFileSystemPolicy action.

" }, "DescribeFileSystems":{ "name":"DescribeFileSystems", @@ -179,7 +264,8 @@ {"shape":"BadRequest"}, {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"}, - {"shape":"MountTargetNotFound"} + {"shape":"MountTargetNotFound"}, + {"shape":"AccessPointNotFound"} ], "documentation":"

Returns the descriptions of all the current mount targets, or a specific mount target, for a file system. When requesting all of the current mount targets, the order of mount targets returned in the response is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeMountTargets action, on either the file system ID that you specify in FileSystemId, or on the file system of the mount target that you specify in MountTargetId.

" }, @@ -197,7 +283,26 @@ {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

Returns the tags associated with a file system. The order of tags returned in the response of one DescribeTags call and the order of tags returned across the responses of a multiple-call iteration (when using pagination) is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeTags action.

" + "documentation":"

Returns the tags associated with a file system. The order of tags returned in the response of one DescribeTags call and the order of tags returned across the responses of a multiple-call iteration (when using pagination) is unspecified.

This operation requires permissions for the elasticfilesystem:DescribeTags action.

", + "deprecated":true, + "deprecatedMessage":"Use ListTagsForResource." + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/resource-tags/{ResourceId}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"AccessPointNotFound"} + ], + "documentation":"

Lists all tags for a top-level EFS resource. You must provide the ID of the resource that you want to retrieve the tags for.

This operation requires permissions for the elasticfilesystem:DescribeAccessPoints action.

" }, "ModifyMountTargetSecurityGroups":{ "name":"ModifyMountTargetSecurityGroups", @@ -217,6 +322,23 @@ ], "documentation":"

Modifies the set of security groups in effect for a mount target.

When you create a mount target, Amazon EFS also creates a new network interface. For more information, see CreateMountTarget. This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

The operation requires permissions for the following actions:

  • elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount target's file system.

  • ec2:ModifyNetworkInterfaceAttribute action on the mount target's network interface.

" }, + "PutFileSystemPolicy":{ + "name":"PutFileSystemPolicy", + "http":{ + "method":"PUT", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/policy", + "responseCode":200 + }, + "input":{"shape":"PutFileSystemPolicyRequest"}, + "output":{"shape":"FileSystemPolicyDescription"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InvalidPolicyException"}, + {"shape":"IncorrectFileSystemLifeCycleState"} + ], + "documentation":"

Applies an Amazon EFS FileSystemPolicy to an Amazon EFS file system. A file system policy is an IAM resource-based policy and can contain multiple policy statements. A file system always has exactly one file system policy, which can be the default policy or an explicit policy set or updated using this API operation. When an explicit policy is set, it overrides the default policy. For more information about the default file system policy, see Using Resource-based Policies with EFS.

This operation requires permissions for the elasticfilesystem:PutFileSystemPolicy action.

" + }, "PutLifecycleConfiguration":{ "name":"PutLifecycleConfiguration", "http":{ @@ -234,6 +356,38 @@ ], "documentation":"

Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system.

Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management.

In the request, specify the following:

  • The ID for the file system for which you are enabling, disabling, or modifying lifecycle management.

  • A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item.

This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation.

To apply a LifecycleConfiguration object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS) permissions as when you created the encrypted file system.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/resource-tags/{ResourceId}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"AccessPointNotFound"} + ], + "documentation":"

Creates a tag for an EFS resource. You can create tags for EFS file systems and access points using this API operation.

This operation requires permissions for the elasticfilesystem:TagResource action.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/resource-tags/{ResourceId}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"AccessPointNotFound"} + ], + "documentation":"

Removes tags from an EFS resource. You can remove tags from EFS file systems and access points using this API operation.

This operation requires permissions for the elasticfilesystem:UntagResource action.

" + }, "UpdateFileSystem":{ "name":"UpdateFileSystem", "http":{ @@ -256,6 +410,97 @@ } }, "shapes":{ + "AccessPointAlreadyExists":{ + "type":"structure", + "required":[ + "ErrorCode", + "AccessPointId" + ], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"}, + "AccessPointId":{"shape":"AccessPointId"} + }, + "documentation":"

Returned if the access point you are trying to create already exists, with the creation token you provided in the request.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "AccessPointArn":{"type":"string"}, + "AccessPointDescription":{ + "type":"structure", + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

The opaque string specified in the request to ensure idempotent creation.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of the access point. This is the value of the Name tag.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags associated with the access point, presented as an array of Tag objects.

" + }, + "AccessPointId":{ + "shape":"AccessPointId", + "documentation":"

The ID of the access point, assigned by Amazon EFS.

" + }, + "AccessPointArn":{ + "shape":"AccessPointArn", + "documentation":"

The unique Amazon Resource Name (ARN) associated with the access point.

" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the EFS file system that the access point applies to.

" + }, + "PosixUser":{ + "shape":"PosixUser", + "documentation":"

The full POSIX identity, including the user ID, group ID, and secondary group IDs on the access point that is used for all file operations by NFS clients using the access point.

" + }, + "RootDirectory":{ + "shape":"RootDirectory", + "documentation":"

The directory on the Amazon EFS file system that the access point exposes as the root directory to NFS clients using the access point.

" + }, + "OwnerId":{ + "shape":"AwsAccountId", + "documentation":"

Identified the AWS account that owns the access point resource.

" + }, + "LifeCycleState":{ + "shape":"LifeCycleState", + "documentation":"

Identifies the lifecycle phase of the access point.

" + } + }, + "documentation":"

Provides a description of an EFS file system access point.

" + }, + "AccessPointDescriptions":{ + "type":"list", + "member":{"shape":"AccessPointDescription"} + }, + "AccessPointId":{"type":"string"}, + "AccessPointLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the AWS account has already created the maximum number of access points allowed per file system.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccessPointNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the specified AccessPointId value doesn't exist in the requester's AWS account.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "AvailabilityZoneId":{"type":"string"}, + "AvailabilityZoneName":{"type":"string"}, "AwsAccountId":{"type":"string"}, "BadRequest":{ "type":"structure", @@ -268,13 +513,50 @@ "error":{"httpStatusCode":400}, "exception":true }, + "BypassPolicyLockoutSafetyCheck":{"type":"boolean"}, + "ClientToken":{ + "type":"string", + "max":64, + "min":1 + }, + "CreateAccessPointRequest":{ + "type":"structure", + "required":[ + "ClientToken", + "FileSystemId" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A string of up to 64 ASCII characters that Amazon EFS uses to ensure idempotent creation.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"Tags", + "documentation":"

Creates tags associated with the access point. Each tag is a key-value pair.

" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the EFS file system that the access point provides access to.

" + }, + "PosixUser":{ + "shape":"PosixUser", + "documentation":"

The operating system user and group applied to all file system requests made using the access point.

" + }, + "RootDirectory":{ + "shape":"RootDirectory", + "documentation":"

Specifies the directory on the Amazon EFS file system that the access point exposes as the root directory of your file system to NFS clients using the access point. The clients using the access point can only access the root directory and below. If the RootDirectory > Path specified does not exist, EFS creates it and applies the CreationInfo settings when a client connects to an access point. When specifying a RootDirectory, you need to provide the Path, and the CreationInfo is optional.

" + } + } + }, "CreateFileSystemRequest":{ "type":"structure", "required":["CreationToken"], "members":{ "CreationToken":{ "shape":"CreationToken", - "documentation":"

A string of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.

" + "documentation":"

A string of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.

", + "idempotencyToken":true }, "PerformanceMode":{ "shape":"PerformanceMode", @@ -348,11 +630,58 @@ }, "documentation":"

" }, + "CreationInfo":{ + "type":"structure", + "required":[ + "OwnerUid", + "OwnerGid", + "Permissions" + ], + "members":{ + "OwnerUid":{ + "shape":"OwnerUid", + "documentation":"

Specifies the POSIX user ID to apply to the RootDirectory. Accepts values from 0 to 2^32 (4294967295).

" + }, + "OwnerGid":{ + "shape":"OwnerGid", + "documentation":"

Specifies the POSIX group ID to apply to the RootDirectory. Accepts values from 0 to 2^32 (4294967295).

" + }, + "Permissions":{ + "shape":"Permissions", + "documentation":"

Specifies the POSIX permissions to apply to the RootDirectory, in the format of an octal number representing the file's mode bits.

" + } + }, + "documentation":"

Required if the RootDirectory > Path specified does not exist. Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory > Path. If the access point root directory does not exist, EFS creates it with these settings when a client connects to the access point. When specifying CreationInfo, you must include values for all properties.

If you do not provide CreationInfo and the specified RootDirectory does not exist, attempts to mount the file system using the access point will fail.

" + }, "CreationToken":{ "type":"string", "max":64, "min":1 }, + "DeleteAccessPointRequest":{ + "type":"structure", + "required":["AccessPointId"], + "members":{ + "AccessPointId":{ + "shape":"AccessPointId", + "documentation":"

The ID of the access point that you want to delete.

", + "location":"uri", + "locationName":"AccessPointId" + } + } + }, + "DeleteFileSystemPolicyRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies the EFS file system for which to delete the FileSystemPolicy.

", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, "DeleteFileSystemRequest":{ "type":"structure", "required":["FileSystemId"], @@ -410,12 +739,66 @@ "error":{"httpStatusCode":504}, "exception":true }, + "DescribeAccessPointsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

(Optional) When retrieving all access points for a file system, you can optionally specify the MaxItems parameter to limit the number of objects returned in a response. The default value is 100.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

NextToken is present if the response is paginated. You can use NextMarker in the subsequent request to fetch the next page of access point descriptions.

", + "location":"querystring", + "locationName":"NextToken" + }, + "AccessPointId":{ + "shape":"AccessPointId", + "documentation":"

(Optional) Specifies an EFS access point to describe in the response; mutually exclusive with FileSystemId.

", + "location":"querystring", + "locationName":"AccessPointId" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

(Optional) If you provide a FileSystemId, EFS returns all access points for that file system; mutually exclusive with AccessPointId.

", + "location":"querystring", + "locationName":"FileSystemId" + } + } + }, + "DescribeAccessPointsResponse":{ + "type":"structure", + "members":{ + "AccessPoints":{ + "shape":"AccessPointDescriptions", + "documentation":"

An array of access point descriptions.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Present if there are more access points than returned in the response. You can use the NextMarker in the subsequent request to fetch the additional descriptions.

" + } + } + }, + "DescribeFileSystemPolicyRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies which EFS file system to retrieve the FileSystemPolicy for.

", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, "DescribeFileSystemsRequest":{ "type":"structure", "members":{ "MaxItems":{ "shape":"MaxItems", - "documentation":"

(Optional) Specifies the maximum number of file systems to return in the response (integer). Currently, this number is automatically set to 10, and other values are ignored. The response is paginated at 10 per page if you have more than 10 file systems.

", + "documentation":"

(Optional) Specifies the maximum number of file systems to return in the response (integer). This number is automatically set to 100. The response is paginated at 100 per page if you have more than 100 file systems.

", "location":"querystring", "locationName":"MaxItems" }, @@ -497,7 +880,7 @@ "members":{ "MaxItems":{ "shape":"MaxItems", - "documentation":"

(Optional) Maximum number of mount targets to return in the response. Currently, this number is automatically set to 10, and other values are ignored. The response is paginated at 10 per page if you have more than 10 mount targets.

", + "documentation":"

(Optional) Maximum number of mount targets to return in the response. Currently, this number is automatically set to 10, and other values are ignored. The response is paginated at 100 per page if you have more than 100 mount targets.

", "location":"querystring", "locationName":"MaxItems" }, @@ -509,15 +892,21 @@ }, "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

(Optional) ID of the file system whose mount targets you want to list (String). It must be included in your request if MountTargetId is not included.

", + "documentation":"

(Optional) ID of the file system whose mount targets you want to list (String). It must be included in your request if an AccessPointId or MountTargetId is not included. Accepts either a file system ID or ARN as input.

", "location":"querystring", "locationName":"FileSystemId" }, "MountTargetId":{ "shape":"MountTargetId", - "documentation":"

(Optional) ID of the mount target that you want to have described (String). It must be included in your request if FileSystemId is not included.

", + "documentation":"

(Optional) ID of the mount target that you want to have described (String). It must be included in your request if FileSystemId is not included. Accepts either a mount target ID or ARN as input.

", "location":"querystring", "locationName":"MountTargetId" + }, + "AccessPointId":{ + "shape":"AccessPointId", + "documentation":"

(Optional) The ID of the access point whose mount targets that you want to list. It must be included in your request if a FileSystemId or MountTargetId is not included in your request. Accepts either an access point ID or ARN as input.

", + "location":"querystring", + "locationName":"AccessPointId" } }, "documentation":"

" @@ -546,7 +935,7 @@ "members":{ "MaxItems":{ "shape":"MaxItems", - "documentation":"

(Optional) The maximum number of file system tags to return in the response. Currently, this number is automatically set to 10, and other values are ignored. The response is paginated at 10 per page if you have more than 10 tags.

", + "documentation":"

(Optional) The maximum number of file system tags to return in the response. Currently, this number is automatically set to 100, and other values are ignored. The response is paginated at 100 per page if you have more than 100 tags.

", "location":"querystring", "locationName":"MaxItems" }, @@ -720,6 +1109,19 @@ "type":"long", "min":0 }, + "FileSystemPolicyDescription":{ + "type":"structure", + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies the EFS file system to which the FileSystemPolicy applies.

" + }, + "Policy":{ + "shape":"Policy", + "documentation":"

The JSON formatted FileSystemPolicy for the EFS file system.

" + } + } + }, "FileSystemSize":{ "type":"structure", "required":["Value"], @@ -747,6 +1149,11 @@ "type":"long", "min":0 }, + "Gid":{ + "type":"long", + "max":4294967295, + "min":0 + }, "IncorrectFileSystemLifeCycleState":{ "type":"structure", "required":["ErrorCode"], @@ -791,6 +1198,16 @@ "error":{"httpStatusCode":500}, "exception":true }, + "InvalidPolicyException":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the FileSystemPolicy is is malformed or contains an error such as an invalid parameter value or a missing required parameter. Returned in the case of a policy lockout safety check error.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "IpAddress":{"type":"string"}, "IpAddressInUse":{ "type":"structure", @@ -841,11 +1258,52 @@ }, "documentation":"

Describes a policy used by EFS lifecycle management to transition files to the Infrequent Access (IA) storage class.

" }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

Specifies the EFS resource you want to retrieve tags for. You can retrieve tags for EFS file systems and access points using this API endpoint.

", + "location":"uri", + "locationName":"ResourceId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

(Optional) Specifies the maximum number of tag objects to return in the response. The default value is 100.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

You can use NextToken in a subsequent request to fetch the next page of access point descriptions if the response payload was paginated.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

An array of the tags for the specified EFS resource.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

NextToken is present if the response payload is paginated. You can use NextToken in a subsequent request to fetch the next page of access point descriptions.

" + } + } + }, "Marker":{"type":"string"}, "MaxItems":{ "type":"integer", "min":1 }, + "MaxResults":{ + "type":"integer", + "min":1 + }, "ModifyMountTargetSecurityGroupsRequest":{ "type":"structure", "required":["MountTargetId"], @@ -914,6 +1372,14 @@ "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", "documentation":"

The ID of the network interface that Amazon EFS created when it created the mount target.

" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

The unique and consistent identifier of the Availability Zone (AZ) that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every AWS account.

" + }, + "AvailabilityZoneName":{ + "shape":"AvailabilityZoneName", + "documentation":"

The name of the Availability Zone (AZ) that the mount target resides in. AZs are independently mapped to names for each AWS account. For example, the Availability Zone us-east-1a for your AWS account might not be the same location as us-east-1a for another AWS account.

" } }, "documentation":"

Provides a description of a mount target.

" @@ -934,6 +1400,7 @@ "error":{"httpStatusCode":404}, "exception":true }, + "Name":{"type":"string"}, "NetworkInterfaceId":{"type":"string"}, "NetworkInterfaceLimitExceeded":{ "type":"structure", @@ -957,6 +1424,21 @@ "error":{"httpStatusCode":409}, "exception":true }, + "OwnerGid":{ + "type":"long", + "max":4294967295, + "min":0 + }, + "OwnerUid":{ + "type":"long", + "max":4294967295, + "min":0 + }, + "Path":{ + "type":"string", + "max":100, + "min":1 + }, "PerformanceMode":{ "type":"string", "enum":[ @@ -964,10 +1446,70 @@ "maxIO" ] }, + "Permissions":{ + "type":"string", + "pattern":"^[0-7]{3,4}$" + }, + "Policy":{"type":"string"}, + "PolicyNotFound":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the default file system policy is in effect for the EFS file system specified.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "PosixUser":{ + "type":"structure", + "required":[ + "Uid", + "Gid" + ], + "members":{ + "Uid":{ + "shape":"Uid", + "documentation":"

The POSIX user ID used for all file system operations using this access point.

" + }, + "Gid":{ + "shape":"Gid", + "documentation":"

The POSIX group ID used for all file system operations using this access point.

" + }, + "SecondaryGids":{ + "shape":"SecondaryGids", + "documentation":"

Secondary POSIX group IDs used for all file system operations using this access point.

" + } + }, + "documentation":"

The full POSIX identity, including the user ID, group ID, and any secondary group IDs, on the access point that is used for all file system operations performed by NFS clients using the access point.

" + }, "ProvisionedThroughputInMibps":{ "type":"double", "min":1.0 }, + "PutFileSystemPolicyRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Policy" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the EFS file system that you want to create or update the FileSystemPolicy for.

", + "location":"uri", + "locationName":"FileSystemId" + }, + "Policy":{ + "shape":"Policy", + "documentation":"

The FileSystemPolicy that you're creating. Accepts a JSON formatted policy definition. To find out more about the elements that make up a file system policy, see EFS Resource-based Policies.

" + }, + "BypassPolicyLockoutSafetyCheck":{ + "shape":"BypassPolicyLockoutSafetyCheck", + "documentation":"

(Optional) A flag to indicate whether to bypass the FileSystemPolicy lockout safety check. The policy lockout safety check determines whether the policy in the request will prevent the principal making the request will be locked out from making future PutFileSystemPolicy requests on the file system. Set BypassPolicyLockoutSafetyCheck to True only when you intend to prevent the principal that is making the request from making a subsequent PutFileSystemPolicy request on the file system. The default value is False.

" + } + } + }, "PutLifecycleConfigurationRequest":{ "type":"structure", "required":[ @@ -987,6 +1529,27 @@ } } }, + "ResourceId":{"type":"string"}, + "RootDirectory":{ + "type":"structure", + "members":{ + "Path":{ + "shape":"Path", + "documentation":"

Specifies the path on the EFS file system to expose as the root directory to NFS clients using the access point to access the EFS file system. A path can have up to four subdirectories. If the specified path does not exist, you are required to provide the CreationInfo.

" + }, + "CreationInfo":{ + "shape":"CreationInfo", + "documentation":"

(Optional) Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory. If the RootDirectory > Path specified does not exist, EFS creates the root directory using the CreationInfo settings when a client connects to an access point. When specifying the CreationInfo, you must provide values for all properties.

If you do not provide CreationInfo and the specified RootDirectory > Path does not exist, attempts to mount the file system using the access point will fail.

" + } + }, + "documentation":"

Specifies the directory on the Amazon EFS file system that the access point provides access to. The access point exposes the specified file system path as the root directory of your file system to applications using the access point. NFS clients using the access point can only access data in the access point's RootDirectory and it's subdirectories.

" + }, + "SecondaryGids":{ + "type":"list", + "member":{"shape":"Gid"}, + "max":16, + "min":0 + }, "SecurityGroup":{"type":"string"}, "SecurityGroupLimitExceeded":{ "type":"structure", @@ -1052,7 +1615,28 @@ }, "TagKeys":{ "type":"list", - "member":{"shape":"TagKey"} + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "Tags" + ], + "members":{ + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

The ID specifying the EFS resource that you want to create a tag for.

", + "location":"uri", + "locationName":"ResourceId" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

" + } + } }, "TagValue":{ "type":"string", @@ -1081,6 +1665,7 @@ ] }, "Timestamp":{"type":"timestamp"}, + "Token":{"type":"string"}, "TooManyRequests":{ "type":"structure", "required":["ErrorCode"], @@ -1102,6 +1687,11 @@ "AFTER_90_DAYS" ] }, + "Uid":{ + "type":"long", + "max":4294967295, + "min":0 + }, "UnsupportedAvailabilityZone":{ "type":"structure", "required":["ErrorCode"], @@ -1113,6 +1703,22 @@ "error":{"httpStatusCode":400}, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

Specifies the EFS resource that you want to remove tags from.

", + "location":"uri", + "locationName":"ResourceId" + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

The keys of the key:value tag pairs that you want to remove from the specified EFS resource.

" + } + } + }, "UpdateFileSystemRequest":{ "type":"structure", "required":["FileSystemId"], diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json index 23757f79..8d9b1ac7 100644 --- a/botocore/data/eks/2017-11-01/service-2.json +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -48,7 +48,7 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"UnsupportedAvailabilityZoneException"} ], - "documentation":"

Creates an AWS Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to schedule pods on Fargate infrastructure.

The Fargate profile allows an administrator to declare which pods run on Fargate infrastructure and specify which pods run on which Fargate profile. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate infrastructure. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is scheduled on Fargate infrastructure.

When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster. This role is what allows Fargate infrastructure to appear in your cluster as nodes. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide.

Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating.

If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster.

For more information, see AWS Fargate Profile in the Amazon EKS User Guide.

" + "documentation":"

Creates an AWS Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate.

The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate.

When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide.

Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating.

If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster.

For more information, see AWS Fargate Profile in the Amazon EKS User Guide.

" }, "CreateNodegroup":{ "name":"CreateNodegroup", @@ -100,7 +100,7 @@ {"shape":"ServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes an AWS Fargate profile.

When you delete a Fargate profile, any pods that were scheduled onto Fargate infrastructure with the profile are deleted. If those pods match another Fargate profile, then they are scheduled on Fargate infrastructure with that profile. If they no longer match any Fargate profiles, then they are not scheduled on Fargate infrastructure.

Only one Fargate profile in a cluster can be in the DELETING status at a time. You must wait for a Fargate profile to finish deleting before you can delete any other profiles in that cluster.

" + "documentation":"

Deletes an AWS Fargate profile.

When you delete a Fargate profile, any pods running on Fargate that were created with the profile are deleted. If those pods match another Fargate profile, then they are scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then they are not scheduled on Fargate and they may remain in a pending state.

Only one Fargate profile in a cluster can be in the DELETING status at a time. You must wait for a Fargate profile to finish deleting before you can delete any other profiles in that cluster.

" }, "DeleteNodegroup":{ "name":"DeleteNodegroup", @@ -587,7 +587,7 @@ }, "subnets":{ "shape":"StringList", - "documentation":"

The IDs of subnets to launch Fargate pods into. At this time, Fargate pods are not assigned public IP addresses, so only private subnets (with no direct route to an Internet Gateway) are accepted for this parameter.

" + "documentation":"

The IDs of subnets to launch your pods into. At this time, pods running on Fargate are not assigned public IP addresses, so only private subnets (with no direct route to an Internet Gateway) are accepted for this parameter.

" }, "selectors":{ "shape":"FargateProfileSelectors", @@ -949,11 +949,11 @@ }, "podExecutionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the pod execution role to use for pods that match the selectors in the Fargate profile. For more information, see Pod Execution Role in the Amazon EKS User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the pod execution role to use for pods that match the selectors in the Fargate profile. For more information, see Pod Execution Role in the Amazon EKS User Guide.

" }, "subnets":{ "shape":"StringList", - "documentation":"

The IDs of subnets to launch Fargate pods into.

" + "documentation":"

The IDs of subnets to launch pods into.

" }, "selectors":{ "shape":"FargateProfileSelectors", @@ -1403,10 +1403,12 @@ "type":"string", "enum":[ "AutoScalingGroupNotFound", + "AutoScalingGroupInvalidConfiguration", "Ec2SecurityGroupNotFound", "Ec2SecurityGroupDeletionFailure", "Ec2LaunchTemplateNotFound", "Ec2LaunchTemplateVersionMismatch", + "Ec2SubnetNotFound", "IamInstanceProfileNotFound", "IamNodeRoleNotFound", "AsgInstanceLaunchFailures", @@ -1891,7 +1893,8 @@ "LabelsToRemove", "MaxSize", "MinSize", - "ReleaseVersion" + "ReleaseVersion", + "PublicAccessCidrs" ] }, "UpdateParams":{ @@ -1929,11 +1932,15 @@ }, "endpointPublicAccess":{ "shape":"BoxedBoolean", - "documentation":"

Set this value to false to disable public access for your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can receive only requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + "documentation":"

Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" }, "endpointPrivateAccess":{ "shape":"BoxedBoolean", - "documentation":"

Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + "documentation":"

Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have worker nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the worker nodes or Fargate pods. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + }, + "publicAccessCidrs":{ + "shape":"StringList", + "documentation":"

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have worker nodes or AWS Fargate pods in the cluster, then ensure that you specify the necessary CIDR blocks. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" } }, "documentation":"

An object representing the VPC configuration to use for an Amazon EKS cluster.

" @@ -1959,11 +1966,15 @@ }, "endpointPublicAccess":{ "shape":"Boolean", - "documentation":"

This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can receive only requests that originate from within the cluster VPC.

" + "documentation":"

This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC.

" }, "endpointPrivateAccess":{ "shape":"Boolean", - "documentation":"

This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet.

" + "documentation":"

This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have worker nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the worker nodes or Fargate pods. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + }, + "publicAccessCidrs":{ + "shape":"StringList", + "documentation":"

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the listed CIDR blocks is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have worker nodes or AWS Fargate pods in the cluster, then ensure that the necessary CIDR blocks are listed. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" } }, "documentation":"

An object representing an Amazon EKS cluster VPC configuration response.

" diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index dd930141..764f65b6 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -71,7 +71,7 @@ "us-east-1" : { } } }, - "acm" : { + "access-analyzer" : { "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, @@ -93,6 +93,58 @@ "us-west-2" : { } } }, + "acm" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "acm-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "acm-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "acm-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "acm-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "acm-fips.us-west-2.amazonaws.com" + } + } + }, "acm-pca" : { "defaults" : { "protocols" : [ "https" ] @@ -384,9 +436,12 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -447,6 +502,7 @@ "eu-west-2" : { }, "eu-west-3" : { }, "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -586,9 +642,15 @@ "cloud9" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -767,6 +829,7 @@ }, "codecommit" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -973,8 +1036,10 @@ "connect" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -1198,6 +1263,12 @@ }, "hostname" : "rds.ap-southeast-2.amazonaws.com" }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "rds.ca-central-1.amazonaws.com" + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -1255,6 +1326,7 @@ "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1628,6 +1700,7 @@ "forecast" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, @@ -1746,6 +1819,8 @@ }, "groundstation" : { "endpoints" : { + "eu-north-1" : { }, + "me-south-1" : { }, "us-east-2" : { }, "us-west-2" : { } } @@ -1964,6 +2039,7 @@ }, "iotsecuredtunneling" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -1975,6 +2051,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2011,6 +2088,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2042,16 +2120,20 @@ }, "kinesisanalytics" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -2059,11 +2141,20 @@ }, "kinesisvideo" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -2202,6 +2293,7 @@ }, "mediaconnect" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2363,6 +2455,7 @@ }, "mq" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2370,6 +2463,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2397,6 +2491,8 @@ }, "hostname" : "mq-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -2602,6 +2698,26 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-global" }, + "outposts" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "pinpoint" : { "defaults" : { "credentialScope" : { @@ -2641,6 +2757,7 @@ }, "polly" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2652,6 +2769,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2743,6 +2861,7 @@ }, "ram" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2754,6 +2873,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2902,6 +3022,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2909,6 +3030,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2998,6 +3120,13 @@ "hostname" : "s3.ap-southeast-2.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "s3.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, @@ -3020,7 +3149,7 @@ "signatureVersions" : [ "s3", "s3v4" ] }, "us-east-1" : { - "hostname" : "s3.amazonaws.com", + "hostname" : "s3.us-east-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, "us-east-2" : { }, @@ -3034,7 +3163,7 @@ } }, "isRegionalized" : true, - "partitionEndpoint" : "us-east-1" + "partitionEndpoint" : "aws-global" }, "s3-control" : { "defaults" : { @@ -4032,6 +4161,16 @@ "cn-northwest-1" : { } } }, + "appsync" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "athena" : { + "endpoints" : { + "cn-northwest-1" : { } + } + }, "autoscaling" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -4166,6 +4305,12 @@ "cn-northwest-1" : { } } }, + "elasticfilesystem" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "elasticloadbalancing" : { "defaults" : { "protocols" : [ "https" ] @@ -4230,6 +4375,12 @@ }, "isRegionalized" : true }, + "health" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "iam" : { "endpoints" : { "aws-cn-global" : { @@ -4302,6 +4453,16 @@ "cn-northwest-1" : { } } }, + "neptune" : { + "endpoints" : { + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "rds.cn-northwest-1.amazonaws.com.cn" + } + } + }, "polly" : { "endpoints" : { "cn-northwest-1" : { } @@ -4351,6 +4512,25 @@ } } }, + "secretsmanager" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "serverlessrepo" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { + "protocols" : [ "https" ] + }, + "cn-northwest-1" : { + "protocols" : [ "https" ] + } + } + }, "sms" : { "endpoints" : { "cn-north-1" : { }, @@ -4395,7 +4575,8 @@ }, "storagegateway" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "streams.dynamodb" : { @@ -4462,6 +4643,12 @@ "endpoints" : { "cn-northwest-1" : { } } + }, + "xray" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } } } }, { @@ -4483,6 +4670,12 @@ } }, "services" : { + "access-analyzer" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "acm" : { "endpoints" : { "us-gov-east-1" : { }, @@ -4565,6 +4758,21 @@ } } }, + "autoscaling-plans" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "batch" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "clouddirectory" : { "endpoints" : { "us-gov-west-1" : { } @@ -4636,6 +4844,11 @@ "us-gov-west-1" : { } } }, + "comprehendmedical" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "config" : { "endpoints" : { "us-gov-east-1" : { }, @@ -4733,6 +4946,7 @@ }, "elasticfilesystem" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -5170,6 +5384,17 @@ "us-gov-west-1" : { } } }, + "support" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "support.us-gov-west-1.amazonaws.com" + } + }, + "partitionEndpoint" : "aws-us-gov-global" + }, "swf" : { "endpoints" : { "us-gov-east-1" : { }, @@ -5187,6 +5412,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -5253,7 +5479,6 @@ }, "application-autoscaling" : { "defaults" : { - "hostname" : "autoscaling.us-iso-east-1.c2s.ic.gov", "protocols" : [ "http", "https" ] }, "endpoints" : { @@ -5523,7 +5748,6 @@ "services" : { "application-autoscaling" : { "defaults" : { - "hostname" : "autoscaling.us-isob-east-1.sc2s.sgov.gov", "protocols" : [ "http", "https" ] }, "endpoints" : { diff --git a/botocore/data/fms/2018-01-01/service-2.json b/botocore/data/fms/2018-01-01/service-2.json index fb1368c8..df678e7b 100644 --- a/botocore/data/fms/2018-01-01/service-2.json +++ b/botocore/data/fms/2018-01-01/service-2.json @@ -189,6 +189,22 @@ ], "documentation":"

Returns an array of PolicySummary objects in the response.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Retrieves the list of tags for the specified AWS resource.

" + }, "PutNotificationChannel":{ "name":"PutNotificationChannel", "http":{ @@ -220,6 +236,39 @@ {"shape":"InvalidTypeException"} ], "documentation":"

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

  • A Shield Advanced policy, which applies Shield Advanced protection to specified accounts and resources

  • An AWS WAF policy, which contains a rule group and defines which resources are to be protected by that rule group

  • A security group policy, which manages VPC security groups across your AWS organization.

Each policy is specific to one of the three types. If you want to enforce more than one policy type across accounts, you can create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Adds one or more tags to an AWS resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Removes one or more tags from an AWS resource.

" } }, "shapes":{ @@ -617,6 +666,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to return tags for. The Firewall Manager policy is the only AWS resource that supports tagging, so this ARN is a policy ARN..

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "TagList":{ + "shape":"TagList", + "documentation":"

The tags associated with the resource.

" + } + } + }, "ManagedServiceData":{ "type":"string", "max":1024, @@ -633,6 +701,7 @@ }, "PaginationToken":{ "type":"string", + "max":4096, "min":1, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, @@ -843,6 +912,10 @@ "Policy":{ "shape":"Policy", "documentation":"

The details of the AWS Firewall Manager policy to be created.

" + }, + "TagList":{ + "shape":"TagList", + "documentation":"

The tags to add to the AWS resource.

" } } }, @@ -894,16 +967,27 @@ "required":["Key"], "members":{ "Key":{ - "shape":"TagKey", + "shape":"ResourceTagKey", "documentation":"

The resource tag key.

" }, "Value":{ - "shape":"TagValue", + "shape":"ResourceTagValue", "documentation":"

The resource tag value.

" } }, "documentation":"

The resource tags that AWS Firewall Manager uses to determine if a particular resource should be included or excluded from the AWS Firewall Manager policy. Tags enable you to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value. Firewall Manager combines the tags with \"AND\" so that, if you add more than one tag to a policy scope, a resource must have all the specified tags to be included or excluded. For more information, see Working with Tag Editor.

" }, + "ResourceTagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "ResourceTagValue":{ + "type":"string", + "max":256, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, "ResourceTags":{ "type":"list", "member":{"shape":"ResourceTag"}, @@ -945,18 +1029,93 @@ "SECURITY_GROUPS_USAGE_AUDIT" ] }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as \"customer.\" Tag keys are case-sensitive.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

" + } + }, + "documentation":"

A collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each AWS resource.

" + }, "TagKey":{ "type":"string", "max":128, "min":1, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagList" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. The Firewall Manager policy is the only AWS resource that supports tagging, so this ARN is a policy ARN.

" + }, + "TagList":{ + "shape":"TagList", + "documentation":"

The tags to add to the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "TagValue":{ "type":"string", "max":256, + "min":0, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TimeStamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. The Firewall Manager policy is the only AWS resource that supports tagging, so this ARN is a policy ARN.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of the tags to remove from the resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "ViolationReason":{ "type":"string", "enum":[ diff --git a/botocore/data/forecastquery/2018-06-26/service-2.json b/botocore/data/forecastquery/2018-06-26/service-2.json index 94aac8e5..67f3c0cb 100644 --- a/botocore/data/forecastquery/2018-06-26/service-2.json +++ b/botocore/data/forecastquery/2018-06-26/service-2.json @@ -28,7 +28,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves a forecast filtered by the supplied criteria.

The criteria is a key-value pair. The key is either item_id (or the equivalent non-timestamp, non-target field) from the TARGET_TIME_SERIES dataset, or one of the forecast dimensions specified as part of the FeaturizationConfig object.

By default, the complete date range of the filtered forecast is returned. Optionally, you can request a specific date range within the forecast.

The forecasts generated by Amazon Forecast are in the same timezone as the dataset that was used to create the predictor.

" + "documentation":"

Retrieves a forecast for a single item, filtered by the supplied criteria.

The criteria is a key-value pair. The key is either item_id (or the equivalent non-timestamp, non-target field) from the TARGET_TIME_SERIES dataset, or one of the forecast dimensions specified as part of the FeaturizationConfig object.

By default, QueryForecast returns the complete date range for the filtered forecast. You can request a specific date range.

To get the full forecast, use the CreateForecastExportJob operation.

The forecasts generated by Amazon Forecast are in the same timezone as the dataset that was used to create the predictor.

" } }, "shapes":{ @@ -60,11 +60,7 @@ }, "documentation":"

The forecast value for a specific date. Part of the Forecast object.

" }, - "DateTime":{ - "type":"string", - "max":20, - "pattern":"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z$" - }, + "DateTime":{"type":"string"}, "Double":{"type":"double"}, "ErrorMessage":{"type":"string"}, "Filters":{ @@ -79,7 +75,7 @@ "members":{ "Predictions":{ "shape":"Predictions", - "documentation":"

The forecast.

The string of the string to array map is one of the following values:

  • mean

  • p10

  • p50

  • p90

" + "documentation":"

The forecast.

The string of the string-to-array map is one of the following values:

  • p10

  • p50

  • p90

" } }, "documentation":"

Provides information about a forecast. Returned as part of the QueryForecast response.

" @@ -89,7 +85,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The value that you provided was invalid or too long.

", + "documentation":"

The value is invalid or is too long.

", "exception":true }, "InvalidNextTokenException":{ @@ -131,15 +127,15 @@ }, "StartDate":{ "shape":"DateTime", - "documentation":"

The start date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss'Z' (ISO 8601 format) For example, \"1970-01-01T00:00:00Z.\"

" + "documentation":"

The start date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss (ISO 8601 format). For example, 2015-01-01T08:00:00.

" }, "EndDate":{ "shape":"DateTime", - "documentation":"

The end date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss'Z' (ISO 8601 format). For example, \"1970-01-01T00:00:00Z.\"

" + "documentation":"

The end date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss (ISO 8601 format). For example, 2015-01-01T20:00:00.

" }, "Filters":{ "shape":"Filters", - "documentation":"

The filtering criteria to apply when retrieving the forecast. For example:

  • To get a forecast for a specific item specify the following:

    {\"item_id\" : \"client_1\"}

  • To get a forecast for a specific item sold in a specific location, specify the following:

    {\"item_id\" : \"client_1\", \"location\" : \"ny\"}

  • To get a forecast for all blue items sold in a specific location, specify the following:

    { \"location\" : \"ny\", \"color\":\"blue\"}

To get the full forecast, use the operation.

" + "documentation":"

The filtering criteria to apply when retrieving the forecast. For example, to get the forecast for client_21 in the electricity usage dataset, specify the following:

{\"item_id\" : \"client_21\"}

To get the full forecast, use the CreateForecastExportJob operation.

" }, "NextToken":{ "shape":"NextToken", @@ -180,11 +176,7 @@ "type":"list", "member":{"shape":"DataPoint"} }, - "Timestamp":{ - "type":"string", - "max":20, - "pattern":"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z$" - } + "Timestamp":{"type":"string"} }, "documentation":"

Provides APIs for creating and managing Amazon Forecast resources.

" } diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index 01c04159..9bee19cb 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -12,6 +12,24 @@ "uid":"fsx-2018-03-01" }, "operations":{ + "CancelDataRepositoryTask":{ + "name":"CancelDataRepositoryTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelDataRepositoryTaskRequest"}, + "output":{"shape":"CancelDataRepositoryTaskResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"UnsupportedOperation"}, + {"shape":"DataRepositoryTaskNotFound"}, + {"shape":"DataRepositoryTaskEnded"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following.

  • Any files that FSx has already exported are not reverted.

  • FSx continues to export any files that are \"in-flight\" when the cancel operation is received.

  • FSx does not export any files that have not yet been exported.

", + "idempotent":true + }, "CreateBackup":{ "name":"CreateBackup", "http":{ @@ -32,6 +50,26 @@ "documentation":"

Creates a backup of an existing Amazon FSx for Windows File Server file system. Creating regular backups for your file system is a best practice that complements the replication that Amazon FSx for Windows File Server performs for your file system. It also enables you to restore from user modification of data.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

  • Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the backup.

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateFileSystem operation returns while the backup's lifecycle state is still CREATING. You can check the file system creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", "idempotent":true }, + "CreateDataRepositoryTask":{ + "name":"CreateDataRepositoryTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataRepositoryTaskRequest"}, + "output":{"shape":"CreateDataRepositoryTaskResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"UnsupportedOperation"}, + {"shape":"FileSystemNotFound"}, + {"shape":"IncompatibleParameterError"}, + {"shape":"ServiceLimitExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"DataRepositoryTaskExecuting"} + ], + "documentation":"

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Using Data Repository Tasks. To learn more about linking a data repository to your file system, see Step 1: Create Your Amazon FSx for Lustre File System.

", + "idempotent":true + }, "CreateFileSystem":{ "name":"CreateFileSystem", "http":{ @@ -107,7 +145,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a file system, deleting its contents. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups will also be deleted.

By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup is not subject to the file system's retention policy, and must be manually deleted.

The DeleteFileSystem action returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems action, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems returns a FileSystemNotFound error.

The data in a deleted file system is also deleted and can't be recovered by any means.

", + "documentation":"

Deletes a file system, deleting its contents. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups will also be deleted.

By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup is not subject to the file system's retention policy, and must be manually deleted.

The DeleteFileSystem action returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems action, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems returns a FileSystemNotFound error.

Deleting an Amazon FSx for Lustre file system will fail with a 400 BadRequest if a data repository task is in a PENDING or EXECUTING state.

The data in a deleted file system is also deleted and can't be recovered by any means.

", "idempotent":true }, "DescribeBackups":{ @@ -126,6 +164,22 @@ ], "documentation":"

Returns the description of specific Amazon FSx for Windows File Server backups, if a BackupIds value is provided for that backup. Otherwise, it returns all backups owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all backups, you can optionally specify the MaxResults parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your backups. DescribeBackups is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

  • The implementation might return fewer than MaxResults file system descriptions while still including a NextToken value.

  • The order of backups returned in the response of one DescribeBackups call and the order of backups returned across the responses of a multi-call iteration is unspecified.

" }, + "DescribeDataRepositoryTasks":{ + "name":"DescribeDataRepositoryTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataRepositoryTasksRequest"}, + "output":{"shape":"DescribeDataRepositoryTasksResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"DataRepositoryTaskNotFound"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns the description of specific Amazon FSx for Lustre data repository tasks, if one or more TaskIds values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all tasks, you can paginate the response by using the optional MaxResults parameter to limit the number of tasks returned in a response. If more tasks remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

" + }, "DescribeFileSystems":{ "name":"DescribeFileSystems", "http":{ @@ -262,11 +316,17 @@ "INVALID_DOMAIN_STAGE" ] }, - "ActiveDirectoryFullyQualifiedName":{"type":"string"}, + "ActiveDirectoryFullyQualifiedName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^.{1,255}$" + }, "ArchivePath":{ "type":"string", "max":900, - "min":3 + "min":3, + "pattern":"^.{3,900}$" }, "AutomaticBackupRetentionDays":{ "type":"integer", @@ -307,7 +367,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key used to encrypt this backup's data.

" + "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key used to encrypt this backup of the Amazon FSx for Windows file system's data at rest. Amazon FSx for Lustre does not support KMS encryption.

" }, "ResourceARN":{ "shape":"ResourceARN", @@ -411,12 +471,59 @@ "documentation":"

A generic error indicating a failure with a client request.

", "exception":true }, + "CancelDataRepositoryTaskRequest":{ + "type":"structure", + "required":["TaskId"], + "members":{ + "TaskId":{ + "shape":"TaskId", + "documentation":"

Specifies the data repository task to cancel.

" + } + }, + "documentation":"

Cancels a data repository task.

" + }, + "CancelDataRepositoryTaskResponse":{ + "type":"structure", + "members":{ + "Lifecycle":{ + "shape":"DataRepositoryTaskLifecycle", + "documentation":"

The lifecycle status of the data repository task, as follows:

  • PENDING - Amazon FSx has not started the task.

  • EXECUTING - Amazon FSx is processing the task.

  • FAILED - Amazon FSx was not able to complete the task. For example, there may be files the task failed to process. The DataRepositoryTaskFailureDetails property provides more information about task failures.

  • SUCCEEDED - FSx completed the task successfully.

  • CANCELED - Amazon FSx canceled the task and it did not complete.

  • CANCELING - FSx is in process of canceling the task.

" + }, + "TaskId":{ + "shape":"TaskId", + "documentation":"

The ID of the task being canceled.

" + } + } + }, "ClientRequestToken":{ "type":"string", "documentation":"

(Optional) An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", - "max":255, + "max":63, "min":1, - "pattern":"[A-za-z0-9_.-]{0,255}$" + "pattern":"[A-za-z0-9_.-]{0,63}$" + }, + "CompletionReport":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Flag", + "documentation":"

Set Enabled to True to generate a CompletionReport when the task completes. If set to true, then you need to provide a report Scope, Path, and Format. Set Enabled to False if you do not want a CompletionReport generated when the task completes.

" + }, + "Path":{ + "shape":"ArchivePath", + "documentation":"

Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path you provide must be located within the file system’s ExportPath. An example Path value is \"s3://myBucket/myExportPath/optionalPrefix\". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's ExportPath, see .

" + }, + "Format":{ + "shape":"ReportFormat", + "documentation":"

Required if Enabled is set to true. Specifies the format of the CompletionReport. REPORT_CSV_20191124 is the only format currently supported. When Format is set to REPORT_CSV_20191124, the CompletionReport is provided in CSV format, and is delivered to {path}/task-{id}/failures.csv.

" + }, + "Scope":{ + "shape":"ReportScope", + "documentation":"

Required if Enabled is set to true. Specifies the scope of the CompletionReport; FAILED_FILES_ONLY is the only scope currently supported. When Scope is set to FAILED_FILES_ONLY, the CompletionReport only contains information about files that the data repository task failed to process.

" + } + }, + "documentation":"

Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report Scope parameter. FSx delivers the report to the file system's linked data repository in Amazon S3, using the path specified in the report Path parameter. You can specify whether or not a report gets generated for a task using the Enabled parameter.

" }, "CreateBackupRequest":{ "type":"structure", @@ -448,6 +555,43 @@ }, "documentation":"

The response object for the CreateBackup operation.

" }, + "CreateDataRepositoryTaskRequest":{ + "type":"structure", + "required":[ + "Type", + "FileSystemId", + "Report" + ], + "members":{ + "Type":{ + "shape":"DataRepositoryTaskType", + "documentation":"

Specifies the type of data repository task to create.

" + }, + "Paths":{ + "shape":"DataRepositoryTaskPaths", + "documentation":"

(Optional) The path or paths on the Amazon FSx file system to use when the data repository task is processed. The default path is the file system root directory.

" + }, + "FileSystemId":{"shape":"FileSystemId"}, + "Report":{ + "shape":"CompletionReport", + "documentation":"

Defines whether or not Amazon FSx provides a CompletionReport once the task has completed. A CompletionReport provides a detailed report on the files that Amazon FSx processed that meet the criteria specified by the Scope parameter.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "Tags":{"shape":"Tags"} + } + }, + "CreateDataRepositoryTaskResponse":{ + "type":"structure", + "members":{ + "DataRepositoryTask":{ + "shape":"DataRepositoryTask", + "documentation":"

The description of the data repository task that you just created.

" + } + } + }, "CreateFileSystemFromBackupRequest":{ "type":"structure", "required":[ @@ -640,6 +784,177 @@ }, "documentation":"

The data repository configuration object for Lustre file systems returned in the response of the CreateFileSystem operation.

" }, + "DataRepositoryTask":{ + "type":"structure", + "required":[ + "TaskId", + "Lifecycle", + "Type", + "CreationTime", + "FileSystemId" + ], + "members":{ + "TaskId":{ + "shape":"TaskId", + "documentation":"

The system-generated, unique 17-digit ID of the data repository task.

" + }, + "Lifecycle":{ + "shape":"DataRepositoryTaskLifecycle", + "documentation":"

The lifecycle status of the data repository task, as follows:

  • PENDING - Amazon FSx has not started the task.

  • EXECUTING - Amazon FSx is processing the task.

  • FAILED - Amazon FSx was not able to complete the task. For example, there may be files the task failed to process. The DataRepositoryTaskFailureDetails property provides more information about task failures.

  • SUCCEEDED - FSx completed the task successfully.

  • CANCELED - Amazon FSx canceled the task and it did not complete.

  • CANCELING - FSx is in process of canceling the task.

You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the PENDING or EXECUTING states. Please retry when the data repository task is finished (with a status of CANCELED, SUCCEEDED, or FAILED). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.

" + }, + "Type":{ + "shape":"DataRepositoryTaskType", + "documentation":"

The type of data repository task; EXPORT_TO_REPOSITORY is the only type currently supported.

" + }, + "CreationTime":{"shape":"CreationTime"}, + "StartTime":{ + "shape":"StartTime", + "documentation":"

The time that Amazon FSx began processing the task.

" + }, + "EndTime":{ + "shape":"EndTime", + "documentation":"

The time that Amazon FSx completed processing the task, populated after the task is complete.

" + }, + "ResourceARN":{"shape":"ResourceARN"}, + "Tags":{"shape":"Tags"}, + "FileSystemId":{"shape":"FileSystemId"}, + "Paths":{ + "shape":"DataRepositoryTaskPaths", + "documentation":"

An array of paths on the Amazon FSx for Lustre file system that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.

(Default) If Paths is not specified, Amazon FSx uses the file system root directory.

" + }, + "FailureDetails":{ + "shape":"DataRepositoryTaskFailureDetails", + "documentation":"

Failure message describing why the task failed, it is populated only when Lifecycle is set to FAILED.

" + }, + "Status":{ + "shape":"DataRepositoryTaskStatus", + "documentation":"

Provides the status of the number of files that the task has processed successfully and failed to process.

" + }, + "Report":{"shape":"CompletionReport"} + }, + "documentation":"

A description of the data repository task. You use data repository tasks to perform bulk transfer operations between your Amazon FSx file system and its linked data repository.

" + }, + "DataRepositoryTaskEnded":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The data repository task could not be canceled because the task has already ended.

", + "exception":true + }, + "DataRepositoryTaskExecuting":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An existing data repository task is currently executing on the file system. Wait until the existing task has completed, then create the new task.

", + "exception":true + }, + "DataRepositoryTaskFailureDetails":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Provides information about why a data repository task failed. Only populated when the task Lifecycle is set to FAILED.

" + }, + "DataRepositoryTaskFilter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"DataRepositoryTaskFilterName", + "documentation":"

Name of the task property to use in filtering the tasks returned in the response.

  • Use file-system-id to retrieve data repository tasks for specific file systems.

  • Use task-lifecycle to retrieve data repository tasks with one or more specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, PENDING, and SUCCEEDED.

" + }, + "Values":{ + "shape":"DataRepositoryTaskFilterValues", + "documentation":"

Use Values to include the specific file system IDs and task lifecycle states for the filters you are using.

" + } + }, + "documentation":"

(Optional) An array of filter objects you can use to filter the response of data repository tasks you will see in the the response. You can filter the tasks returned in the response by one or more file system IDs, task lifecycles, and by task type. A filter object consists of a filter Name, and one or more Values for the filter.

" + }, + "DataRepositoryTaskFilterName":{ + "type":"string", + "enum":[ + "file-system-id", + "task-lifecycle" + ] + }, + "DataRepositoryTaskFilterValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$" + }, + "DataRepositoryTaskFilterValues":{ + "type":"list", + "member":{"shape":"DataRepositoryTaskFilterValue"}, + "max":20 + }, + "DataRepositoryTaskFilters":{ + "type":"list", + "member":{"shape":"DataRepositoryTaskFilter"}, + "max":3 + }, + "DataRepositoryTaskLifecycle":{ + "type":"string", + "enum":[ + "PENDING", + "EXECUTING", + "FAILED", + "SUCCEEDED", + "CANCELED", + "CANCELING" + ] + }, + "DataRepositoryTaskNotFound":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The data repository task or tasks you specified could not be found.

", + "exception":true + }, + "DataRepositoryTaskPath":{ + "type":"string", + "max":4096, + "min":0, + "pattern":"^.{0,4096}$" + }, + "DataRepositoryTaskPaths":{ + "type":"list", + "member":{"shape":"DataRepositoryTaskPath"}, + "max":100 + }, + "DataRepositoryTaskStatus":{ + "type":"structure", + "members":{ + "TotalCount":{ + "shape":"TotalCount", + "documentation":"

The total number of files that the task will process. While a task is executing, the sum of SucceededCount plus FailedCount may not equal TotalCount. When the task is complete, TotalCount equals the sum of SucceededCount plus FailedCount.

" + }, + "SucceededCount":{ + "shape":"SucceededCount", + "documentation":"

A running total of the number of files that the task has successfully processed.

" + }, + "FailedCount":{ + "shape":"FailedCount", + "documentation":"

A running total of the number of files that the task failed to process.

" + }, + "LastUpdatedTime":{ + "shape":"LastUpdatedTime", + "documentation":"

The time at which the task status was last updated.

" + } + }, + "documentation":"

Provides the task status showing a running total of the total number of files to be processed, the number successfully processed, and the number of files the task failed to process.

" + }, + "DataRepositoryTaskType":{ + "type":"string", + "enum":["EXPORT_TO_REPOSITORY"] + }, + "DataRepositoryTasks":{ + "type":"list", + "member":{"shape":"DataRepositoryTask"}, + "max":50 + }, "DeleteBackupRequest":{ "type":"structure", "required":["BackupId"], @@ -766,6 +1081,31 @@ }, "documentation":"

Response object for DescribeBackups operation.

" }, + "DescribeDataRepositoryTasksRequest":{ + "type":"structure", + "members":{ + "TaskIds":{ + "shape":"TaskIds", + "documentation":"

(Optional) IDs of the tasks whose descriptions you want to retrieve (String).

" + }, + "Filters":{ + "shape":"DataRepositoryTaskFilters", + "documentation":"

(Optional) You can use filters to narrow the DescribeDataRepositoryTasks response to include just tasks for specific file systems, or tasks in a specific lifecycle state.

" + }, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeDataRepositoryTasksResponse":{ + "type":"structure", + "members":{ + "DataRepositoryTasks":{ + "shape":"DataRepositoryTasks", + "documentation":"

The collection of data repository task descriptions returned.

" + }, + "NextToken":{"shape":"NextToken"} + } + }, "DescribeFileSystemsRequest":{ "type":"structure", "members":{ @@ -808,12 +1148,14 @@ "type":"string", "max":256, "min":1, + "pattern":"^.{1,256}$", "sensitive":true }, "DirectoryUserName":{ "type":"string", "max":256, - "min":1 + "min":1, + "pattern":"^.{1,256}$" }, "DnsIps":{ "type":"list", @@ -821,12 +1163,14 @@ "max":2, "min":1 }, + "EndTime":{"type":"timestamp"}, "ErrorMessage":{ "type":"string", "documentation":"

A detailed error message.

", "max":256, "min":1 }, + "FailedCount":{"type":"long"}, "FileSystem":{ "type":"structure", "members":{ @@ -873,7 +1217,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the file system's data for an Amazon FSx for Windows File Server file system.

" + "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the file system's data for an Amazon FSx for Windows File Server file system. Amazon FSx for Lustre does not support KMS encryption.

" }, "ResourceARN":{ "shape":"ResourceARN", @@ -894,7 +1238,8 @@ "FileSystemAdministratorsGroupName":{ "type":"string", "max":256, - "min":1 + "min":1, + "pattern":"^.{1,256}$" }, "FileSystemFailureDetails":{ "type":"structure", @@ -1059,15 +1404,18 @@ }, "IpAddress":{ "type":"string", + "max":15, + "min":7, "pattern":"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" }, "KmsKeyId":{ "type":"string", - "documentation":"

The ID of your AWS Key Management Service (AWS KMS) key. This ID is used to encrypt the data in your file system at rest. For more information, see Encrypt in the AWS Key Management Service API Reference.

", + "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the file system's data for an Amazon FSx for Windows File Server file system at rest. Amazon FSx for Lustre does not support KMS encryption. For more information, see Encrypt in the AWS Key Management Service API Reference.

", "max":2048, "min":1, "pattern":"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[89aAbB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}|arn:aws[a-z-]{0,7}:kms:[a-z]{2}-[a-z-]{4,}-\\d+:\\d{12}:(key|alias)\\/([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[89aAbB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}|[a-zA-Z0-9:\\/_-]+)|alias\\/[a-zA-Z0-9:\\/_-]+$" }, + "LastUpdatedTime":{"type":"timestamp"}, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceARN"], @@ -1172,7 +1520,8 @@ "OrganizationalUnitDistinguishedName":{ "type":"string", "max":2000, - "min":1 + "min":1, + "pattern":"^.{1,2000}$" }, "Parameter":{ "type":"string", @@ -1185,6 +1534,14 @@ "max":100, "min":0 }, + "ReportFormat":{ + "type":"string", + "enum":["REPORT_CSV_20191124"] + }, + "ReportScope":{ + "type":"string", + "enum":["FAILED_FILES_ONLY"] + }, "ResourceARN":{ "type":"string", "documentation":"

The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify AWS resources. We require an ARN when you need to specify a resource unambiguously across all of AWS. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", @@ -1288,7 +1645,7 @@ }, "DnsIps":{ "shape":"DnsIps", - "documentation":"

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory. The IP addresses need to be either in the same VPC CIDR range as the one in which your Amazon FSx file system is being created, or in the private IP version 4 (Iv4) address ranges, as specified in RFC 1918:

  • 10.0.0.0 - 10.255.255.255 (10/8 prefix)

  • 172.16.0.0 - 172.31.255.255 (172.16/12 prefix)

  • 192.168.0.0 - 192.168.255.255 (192.168/16 prefix)

" + "documentation":"

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory. The IP addresses need to be either in the same VPC CIDR range as the one in which your Amazon FSx file system is being created, or in the private IP version 4 (IPv4) address ranges, as specified in RFC 1918:

  • 10.0.0.0 - 10.255.255.255 (10/8 prefix)

  • 172.16.0.0 - 172.31.255.255 (172.16/12 prefix)

  • 192.168.0.0 - 192.168.255.255 (192.168/16 prefix)

" } }, "documentation":"

The configuration that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory.

" @@ -1334,10 +1691,11 @@ "documentation":"

An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting AWS Support.

", "exception":true }, + "StartTime":{"type":"timestamp"}, "StorageCapacity":{ "type":"integer", "documentation":"

The storage capacity for your Amazon FSx file system, in gibibytes.

", - "min":1 + "min":0 }, "SubnetId":{ "type":"string", @@ -1352,6 +1710,7 @@ "documentation":"

A list of subnet IDs. Currently, you can specify only one subnet ID in a call to the CreateFileSystem operation.

", "max":50 }, + "SucceededCount":{"type":"long"}, "Tag":{ "type":"structure", "members":{ @@ -1370,7 +1729,8 @@ "type":"string", "documentation":"

A string of 1 to 128 characters that specifies the key for a tag. Tag keys must be unique for the resource to which they are attached.

", "max":128, - "min":1 + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagKeys":{ "type":"list", @@ -1407,7 +1767,8 @@ "type":"string", "documentation":"

A string of 0 to 256 characters that specifies the value for a tag. Tag values can be null and don't have to be unique in a tag set.

", "max":256, - "min":0 + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "Tags":{ "type":"list", @@ -1416,12 +1777,24 @@ "max":50, "min":1 }, + "TaskId":{ + "type":"string", + "max":128, + "min":12, + "pattern":"^(task-[0-9a-f]{17,})$" + }, + "TaskIds":{ + "type":"list", + "member":{"shape":"TaskId"}, + "max":50 + }, + "TotalCount":{"type":"long"}, "UnsupportedOperation":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

An error occured.

", + "documentation":"

The requested operation is not supported for this resource or API.

", "exception":true }, "UntagResourceRequest":{ diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 77649c76..1f0ff2d4 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -41,9 +41,10 @@ {"shape":"InvalidRequestException"}, {"shape":"ConflictException"}, {"shape":"InternalServiceException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"TaggingFailedException"} ], - "documentation":"

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. By using a fleet alias instead of a specific fleet ID, you can switch gameplay and players to a new fleet without changing your game client or other game components. For example, for games in production, using an alias allows you to seamlessly redirect your player base to a new game server update.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID, which you can reference when creating a game session. You can reassign an alias to another fleet by calling UpdateAlias.

" + "documentation":"

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias.

" }, "CreateBuild":{ "name":"CreateBuild", @@ -57,9 +58,10 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"}, {"shape":"ConflictException"}, + {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new Amazon GameLift build record for your game server binary files and points to the location of your game server build files in an Amazon Simple Storage Service (Amazon S3) location.

Game server binaries must be combined into a .zip file for use with Amazon GameLift.

To create new builds quickly and easily, use the AWS CLI command upload-build . This helper command uploads your build and creates a new build record in one step, and automatically handles the necessary permissions.

The CreateBuild operation should be used only when you need to manually upload your build files, as in the following scenarios:

  • Store a build file in an Amazon S3 bucket under your own AWS account. To use this option, you must first give Amazon GameLift access to that Amazon S3 bucket. To create a new build record using files in your Amazon S3 bucket, call CreateBuild and specify a build name, operating system, and the storage location of your game build.

  • Upload a build file directly to Amazon GameLift's Amazon S3 account. To use this option, you first call CreateBuild with a build name and operating system. This action creates a new build record and returns an Amazon S3 storage location (bucket and key only) and temporary access credentials. Use the credentials to manually upload your build file to the storage location (see the Amazon S3 topic Uploading Objects). You can upload files to a location only once.

If successful, this operation creates a new build record with a unique build ID and places it in INITIALIZED status. You can use DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets.

Learn more

Uploading Your Game

Create a Build with Files in Amazon S3

Related operations

" + "documentation":"

Creates a new Amazon GameLift build record for your game server binary files and points to the location of your game server build files in an Amazon Simple Storage Service (Amazon S3) location.

Game server binaries must be combined into a zip file for use with Amazon GameLift.

To create new builds directly from a file directory, use the AWS CLI command upload-build . This helper command uploads build files and creates a new build record in one step, and automatically handles the necessary permissions.

The CreateBuild operation should be used only in the following scenarios:

  • To create a new game build with build files that are in an Amazon S3 bucket under your own AWS account. To use this option, you must first give Amazon GameLift access to that Amazon S3 bucket. Then call CreateBuild and specify a build name, operating system, and the Amazon S3 storage location of your game build.

  • To upload build files directly to Amazon GameLift's Amazon S3 account. To use this option, first call CreateBuild and specify a build name and operating system. This action creates a new build record and returns an Amazon S3 storage location (bucket and key only) and temporary access credentials. Use the credentials to manually upload your build file to the provided storage location (see the Amazon S3 topic Uploading Objects). You can upload build files to the GameLift Amazon S3 location only once.

If successful, this operation creates a new build record with a unique build ID and places it in INITIALIZED status. You can use DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets.

Learn more

Uploading Your Game https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html

Create a Build with Files in Amazon S3

Related operations

" }, "CreateFleet":{ "name":"CreateFleet", @@ -75,9 +77,10 @@ {"shape":"ConflictException"}, {"shape":"LimitExceededException"}, {"shape":"InvalidRequestException"}, - {"shape":"UnauthorizedException"} + {"shape":"UnauthorizedException"}, + {"shape":"TaggingFailedException"} ], - "documentation":"

Creates a new fleet to run your game servers. whether they are custom game builds or Realtime Servers with game-specific script. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can host multiple game sessions. When creating a fleet, you choose the hardware specifications, set some configuration options, and specify the game server to deploy on the new fleet.

To create a new fleet, you must provide the following: (1) a fleet name, (2) an EC2 instance type and fleet type (spot or on-demand), (3) the build ID for your game build or script ID if using Realtime Servers, and (4) a run-time configuration, which determines how game servers will run on each instance in the fleet.

When creating a Realtime Servers fleet, we recommend using a minimal version of the Realtime script (see this working code example ). This will make it much easier to troubleshoot any fleet creation issues. Once the fleet is active, you can update your Realtime script as needed.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

    Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build or Realtime script to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Learn more

Working with Fleets

Debug Fleet Creation Issues

Related operations

" + "documentation":"

Creates a new fleet to run your game servers. whether they are custom game builds or Realtime Servers with game-specific script. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can host multiple game sessions. When creating a fleet, you choose the hardware specifications, set some configuration options, and specify the game server to deploy on the new fleet.

To create a new fleet, you must provide the following: (1) a fleet name, (2) an EC2 instance type and fleet type (spot or on-demand), (3) the build ID for your game build or script ID if using Realtime Servers, and (4) a runtime configuration, which determines how game servers will run on each instance in the fleet.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

  • Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build or Realtime script to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each process launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Learn more

Setting Up Fleets

Debug Fleet Creation Issues

Related operations

" }, "CreateGameSession":{ "name":"CreateGameSession", @@ -113,9 +116,10 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"TaggingFailedException"} ], - "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

" + "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple Regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

" }, "CreateMatchmakingConfiguration":{ "name":"CreateMatchmakingConfiguration", @@ -130,7 +134,8 @@ {"shape":"LimitExceededException"}, {"shape":"NotFoundException"}, {"shape":"InternalServiceException"}, - {"shape":"UnsupportedRegionException"} + {"shape":"UnsupportedRegionException"}, + {"shape":"TaggingFailedException"} ], "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration. Since notifications promise only \"best effort\" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

Learn more

Design a FlexMatch Matchmaker

Setting up Notifications for Matchmaking

Related operations

" }, @@ -145,9 +150,10 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"}, - {"shape":"UnsupportedRegionException"} + {"shape":"UnsupportedRegionException"}, + {"shape":"TaggingFailedException"} ], - "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

" + "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

" }, "CreatePlayerSession":{ "name":"CreatePlayerSession", @@ -199,6 +205,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"}, {"shape":"ConflictException"}, + {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], "documentation":"

Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session.

To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:

  • A locally available directory. Use the ZipFile parameter for this option.

  • An Amazon Simple Storage Service (Amazon S3) bucket under your AWS account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket.

If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment.

Learn more

Amazon GameLift Realtime Servers

Set Up a Role for Amazon GameLift Access

Related operations

" @@ -217,7 +224,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS account. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. Once you've received authorization, call CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

You can peer with VPCs that are owned by any AWS account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different regions.

To request authorization to create a connection, call this operation from the AWS account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the AWS account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

To request authorization to delete a connection, call this operation from the AWS account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the AWS account that you use to manage Amazon GameLift.

The authorization remains valid for 24 hours unless it is canceled by a call to DeleteVpcPeeringAuthorization. You must create or delete the peering connection while the authorization is valid.

" + "documentation":"

Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS account. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. Once you've received authorization, call CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

You can peer with VPCs that are owned by any AWS account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions.

To request authorization to create a connection, call this operation from the AWS account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the AWS account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

To request authorization to delete a connection, call this operation from the AWS account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the AWS account that you use to manage Amazon GameLift.

The authorization remains valid for 24 hours unless it is canceled by a call to DeleteVpcPeeringAuthorization. You must create or delete the peering connection while the authorization is valid.

" }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -233,7 +240,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Establishes a VPC peering connection between a virtual private cloud (VPC) in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. You can peer with VPCs in any AWS account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different regions. For more information, see VPC Peering with Amazon GameLift Fleets.

Before calling this operation to establish the peering connection, you first need to call CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

To establish the connection, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The AWS account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a VpcPeeringConnection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections, or by monitoring fleet events for success or failure using DescribeFleetEvents.

" + "documentation":"

Establishes a VPC peering connection between a virtual private cloud (VPC) in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. You can peer with VPCs in any AWS account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions. For more information, see VPC Peering with Amazon GameLift Fleets.

Before calling this operation to establish the peering connection, you first need to call CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

To establish the connection, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The AWS account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a VpcPeeringConnection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections, or by monitoring fleet events for success or failure using DescribeFleetEvents.

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -246,6 +253,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, {"shape":"InvalidRequestException"}, + {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], "documentation":"

Deletes an alias. This action removes all record of the alias. Game clients attempting to access a server process using the deleted alias receive an error. To delete an alias, specify the alias ID to be deleted.

" @@ -261,6 +269,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, {"shape":"InternalServiceException"}, + {"shape":"TaggingFailedException"}, {"shape":"InvalidRequestException"} ], "documentation":"

Deletes a build. This action permanently deletes the build record and any uploaded build files.

To delete a build, specify its ID. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

Learn more

Working with Builds

Related operations

" @@ -277,9 +286,10 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidFleetStatusException"}, {"shape":"UnauthorizedException"}, - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

" }, "DeleteGameSessionQueue":{ "name":"DeleteGameSessionQueue", @@ -293,7 +303,8 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"}, - {"shape":"UnauthorizedException"} + {"shape":"UnauthorizedException"}, + {"shape":"TaggingFailedException"} ], "documentation":"

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

" }, @@ -309,7 +320,8 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"}, {"shape":"InternalServiceException"}, - {"shape":"UnsupportedRegionException"} + {"shape":"UnsupportedRegionException"}, + {"shape":"TaggingFailedException"} ], "documentation":"

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Related operations

" }, @@ -325,7 +337,8 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"TaggingFailedException"} ], "documentation":"

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

" }, @@ -355,6 +368,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, + {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], "documentation":"

Deletes a Realtime script. This action permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

Learn more

Amazon GameLift Realtime Servers

Related operations

" @@ -436,7 +450,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on region. Available regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on Region. Available Regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

Learn more

Working with Fleets.

Related operations

" }, "DescribeFleetAttributes":{ "name":"DescribeFleetAttributes", @@ -452,7 +466,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" }, "DescribeFleetCapacity":{ "name":"DescribeFleetCapacity", @@ -468,7 +482,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" }, "DescribeFleetEvents":{ "name":"DescribeFleetEvents", @@ -484,7 +498,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Learn more

Working with Fleets.

Related operations

" }, "DescribeFleetPortSettings":{ "name":"DescribeFleetPortSettings", @@ -500,7 +514,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Learn more

Working with Fleets.

Related operations

" }, "DescribeFleetUtilization":{ "name":"DescribeFleetUtilization", @@ -516,7 +530,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" }, "DescribeGameSessionDetails":{ "name":"DescribeGameSessionDetails", @@ -565,7 +579,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the region.

" + "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.

" }, "DescribeGameSessions":{ "name":"DescribeGameSessions", @@ -613,7 +627,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

" + "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

" }, "DescribeMatchmakingConfigurations":{ "name":"DescribeMatchmakingConfigurations", @@ -644,7 +658,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

" + "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

" }, "DescribePlayerSessions":{ "name":"DescribePlayerSessions", @@ -676,7 +690,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the current run-time configuration for the specified fleet. The run-time configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves the current runtime configuration for the specified fleet. The runtime configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

Learn more

Working with Fleets.

Related operations

" }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -817,7 +831,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set to find only those fleets that are deployed with a specific build or script. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in a particular order.

Learn more

Set Up Fleets.

Related operations

" + "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set to find only those fleets that are deployed with a specific build or script. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in a particular order.

Learn more

Set Up Fleets.

Related operations

" }, "ListScripts":{ "name":"ListScripts", @@ -834,6 +848,22 @@ ], "documentation":"

Retrieves script records for all Realtime scripts that are associated with the AWS account in use.

Learn more

Amazon GameLift Realtime Servers

Related operations

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"TaggingFailedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves all tags that are assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This action handles the permissions necessary to manage tags for the following GameLift resource types:

  • Build

  • Script

  • Fleet

  • Alias

  • GameSessionQueue

  • MatchmakingConfiguration

  • MatchmakingRuleSet

To list tags for a resource, specify the unique ARN value for the resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + }, "PutScalingPolicy":{ "name":"PutScalingPolicy", "http":{ @@ -881,7 +911,7 @@ {"shape":"TerminalRoutingStrategyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the fleet ID that a specified alias is currently pointing to.

" + "documentation":"

Retrieves the fleet ID that an alias is currently pointing to.

" }, "SearchGameSessions":{ "name":"SearchGameSessions", @@ -898,7 +928,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

  • gameSessionId -- Unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value.

  • gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.

  • gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. GameProperty values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings.

  • maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.

  • creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.

  • playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.

  • hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

" + "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

  • gameSessionId -- A unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value.

  • gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.

  • gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. GameProperty values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings.

  • maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.

  • creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.

  • playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.

  • hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

" }, "StartFleetActions":{ "name":"StartFleetActions", @@ -930,7 +960,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

  • The queue name and a set of game session properties and settings

  • A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request

  • (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player)

  • Latency data for all players (if you want to optimize game play for the players)

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

" + "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

  • The queue name and a set of game session properties and settings

  • A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request

  • (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player)

  • Latency data for all players (if you want to optimize game play for the players)

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

" }, "StartMatchBackfill":{ "name":"StartMatchBackfill", @@ -1012,6 +1042,38 @@ ], "documentation":"

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the action is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"TaggingFailedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This action handles the permissions necessary to manage tags for the following GameLift resource types:

  • Build

  • Script

  • Fleet

  • Alias

  • GameSessionQueue

  • MatchmakingConfiguration

  • MatchmakingRuleSet

To add a tag to a resource, specify the unique ARN value for the resource and provide a trig list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"TaggingFailedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Removes a tag that is assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This action handles the permissions necessary to manage tags for the following GameLift resource types:

  • Build

  • Script

  • Fleet

  • Alias

  • GameSessionQueue

  • MatchmakingConfiguration

  • MatchmakingRuleSet

To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to be removed. This action succeeds even if the list includes tags that are not currently assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + }, "UpdateAlias":{ "name":"UpdateAlias", "http":{ @@ -1061,7 +1123,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Learn more

Working with Fleets.

Related operations

" }, "UpdateFleetCapacity":{ "name":"UpdateFleetCapacity", @@ -1080,7 +1142,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Learn more

Working with Fleets.

Related operations

" }, "UpdateFleetPortSettings":{ "name":"UpdateFleetPortSettings", @@ -1099,7 +1161,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Learn more

Working with Fleets.

Related operations

" }, "UpdateGameSession":{ "name":"UpdateGameSession", @@ -1166,7 +1228,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidFleetStatusException"} ], - "documentation":"

Updates the current run-time configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's run-time configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration object with an updated set of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated run-time configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; run-time configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates the current runtime configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration object with an updated set of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; runtime configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.

Learn more

Working with Fleets.

Related operations

" }, "UpdateScript":{ "name":"UpdateScript", @@ -1211,11 +1273,11 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking ticket. The ticket must be in status REQUIRES_ACCEPTANCE; otherwise this request will fail.

" + "documentation":"

A unique identifier for a matchmaking ticket. The ticket must be in status REQUIRES_ACCEPTANCE; otherwise this request will fail.

" }, "PlayerIds":{ "shape":"StringList", - "documentation":"

Unique identifier for a player delivering the response. This parameter can include one or multiple player IDs.

" + "documentation":"

A unique identifier for a player delivering the response. This parameter can include one or multiple player IDs.

" }, "AcceptanceType":{ "shape":"AcceptanceType", @@ -1241,43 +1303,48 @@ "members":{ "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for an alias; alias IDs are unique within a region.

" + "documentation":"

A unique identifier for an alias. Alias IDs are unique within a Region.

" }, "Name":{ "shape":"NonBlankAndLengthConstraintString", - "documentation":"

Descriptive label that is associated with an alias. Alias names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with an alias. Alias names do not need to be unique.

" }, "AliasArn":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for an alias; alias ARNs are unique across all regions.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions.. In a GameLift alias ARN, the resource ID matches the alias ID value.

" }, "Description":{ "shape":"FreeText", - "documentation":"

Human-readable description of an alias.

" + "documentation":"

A human-readable description of an alias.

" }, "RoutingStrategy":{ "shape":"RoutingStrategy", - "documentation":"

Alias configuration for the alias, including routing type and settings.

" + "documentation":"

The routing configuration, including routing type and fleet target, for the alias.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "LastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

The time that this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Properties describing a fleet alias.

" + "documentation":"

Properties that describe an alias resource.

" }, "AliasId":{ "type":"string", - "pattern":"^alias-\\S+" + "pattern":"^alias-\\S+|^arn:.*:alias\\/alias-\\S+" }, "AliasList":{ "type":"list", "member":{"shape":"Alias"} }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "ArnStringModel":{ "type":"string", "max":256, @@ -1304,7 +1371,7 @@ "documentation":"

For a map of up to 10 data type:value pairs. Maximum length for each string value is 100 characters.

" } }, - "documentation":"

Values for use in Player attribute key:value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array, or data map. Each AttributeValue object can use only one of the available properties.

" + "documentation":"

Values for use in Player attribute key-value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array, or data map. Each AttributeValue object can use only one of the available properties.

" }, "AwsCredentials":{ "type":"structure", @@ -1338,15 +1405,19 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build.

" + "documentation":"

A unique identifier for a build.

" + }, + "BuildArn":{ + "shape":"BuildArn", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift build resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift build ARN, the resource ID matches the BuildId value.

" }, "Name":{ "shape":"FreeText", - "documentation":"

Descriptive label that is associated with a build. Build names do not need to be unique. It can be set using CreateBuild or UpdateBuild.

" + "documentation":"

A descriptive label that is associated with a build. Build names do not need to be unique. It can be set using CreateBuild or UpdateBuild.

" }, "Version":{ "shape":"FreeText", - "documentation":"

Version that is associated with a build or script. Version strings do not need to be unique. This value can be set using CreateBuild or UpdateBuild.

" + "documentation":"

Version information that is associated with a build or script. Version strings do not need to be unique. This value can be set using CreateBuild or UpdateBuild.

" }, "Status":{ "shape":"BuildStatus", @@ -1367,9 +1438,13 @@ }, "documentation":"

Properties describing a custom game build.

Related operations

" }, + "BuildArn":{ + "type":"string", + "pattern":"^arn:.*:build\\/build-\\S+" + }, "BuildId":{ "type":"string", - "pattern":"^build-\\S+" + "pattern":"^build-\\S+|^arn:.*:build\\/build-\\S+" }, "BuildList":{ "type":"list", @@ -1387,8 +1462,12 @@ "type":"structure", "required":["CertificateType"], "members":{ - "CertificateType":{"shape":"CertificateType"} - } + "CertificateType":{ + "shape":"CertificateType", + "documentation":"

Indicates whether a TLS/SSL certificate was generated for a fleet.

" + } + }, + "documentation":"

Information about the use of a TLS/SSL certificate for a fleet. TLS certificate generation is enabled at the fleet level, with one certificate generated for the fleet. When this feature is enabled, the certificate can be retrieved using the GameLift Server SDK call GetInstanceCertificate. All instances in a fleet share the same certificate.

" }, "CertificateType":{ "type":"string", @@ -1423,15 +1502,19 @@ "members":{ "Name":{ "shape":"NonBlankAndLengthConstraintString", - "documentation":"

Descriptive label that is associated with an alias. Alias names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with an alias. Alias names do not need to be unique.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Human-readable description of an alias.

" + "documentation":"

A human-readable description of the alias.

" }, "RoutingStrategy":{ "shape":"RoutingStrategy", - "documentation":"

Object that specifies the fleet and routing type to use for the alias.

" + "documentation":"

The routing configuration, including routing type and fleet target, for the alias.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new alias resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1441,7 +1524,7 @@ "members":{ "Alias":{ "shape":"Alias", - "documentation":"

Object that describes the newly created alias record.

" + "documentation":"

The newly created alias resource.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -1451,19 +1534,23 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a build. Build names do not need to be unique. You can use UpdateBuild to change this value later.

" + "documentation":"

A descriptive label that is associated with a build. Build names do not need to be unique. You can use UpdateBuild to change this value later.

" }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

Version that is associated with a build or script. Version strings do not need to be unique. You can use UpdateBuild to change this value later.

" + "documentation":"

Version information that is associated with a build or script. Version strings do not need to be unique. You can use UpdateBuild to change this value later.

" }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key, as well as a the ARN for a role that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket must be in the same region that you want to create a new build in.

" + "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket and your new build must be in the same Region.

" }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

Operating system that the game server binaries are built to run on. This value determines the type of fleet resources that you can use for this build. If your game build contains multiple executables, they all must run on the same operating system. If an operating system is not specified when creating a build, Amazon GameLift uses the default value (WINDOWS_2012). This value cannot be changed later.

" + "documentation":"

The operating system that the game server binaries are built to run on. This value determines the type of fleet resources that you can use for this build. If your game build contains multiple executables, they all must run on the same operating system. If an operating system is not specified when creating a build, Amazon GameLift uses the default value (WINDOWS_2012). This value cannot be changed later.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new build resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1473,7 +1560,7 @@ "members":{ "Build":{ "shape":"Build", - "documentation":"

The newly created build record, including a unique build ID and status.

" + "documentation":"

The newly created build record, including a unique build IDs and status.

" }, "UploadCredentials":{ "shape":"AwsCredentials", @@ -1495,27 +1582,27 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a fleet. Fleet names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a fleet. Fleet names do not need to be unique.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Human-readable description of a fleet.

" + "documentation":"

A human-readable description of a fleet.

" }, "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build to be deployed on the new fleet. The custom game server build must have been successfully uploaded to Amazon GameLift and be in a READY status. This fleet setting cannot be changed once the fleet is created.

" + "documentation":"

A unique identifier for a build to be deployed on the new fleet. You can use either the build ID or ARN value. The custom game server build must have been successfully uploaded to Amazon GameLift and be in a READY status. This fleet setting cannot be changed once the fleet is created.

" }, "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script to be deployed on the new fleet. The Realtime script must have been successfully uploaded to Amazon GameLift. This fleet setting cannot be changed once the fleet is created.

" + "documentation":"

A unique identifier for a Realtime script to be deployed on the new fleet. You can use either the script ID or ARN value. The Realtime script must have been successfully uploaded to Amazon GameLift. This fleet setting cannot be changed once the fleet is created.

" }, "ServerLaunchPath":{ "shape":"NonZeroAndMaxString", - "documentation":"

This parameter is no longer used. Instead, specify a server launch path using the RuntimeConfiguration parameter. (Requests that specify a server launch path and launch parameters instead of a run-time configuration will continue to work.)

" + "documentation":"

This parameter is no longer used. Instead, specify a server launch path using the RuntimeConfiguration parameter. Requests that specify a server launch path and launch parameters instead of a runtime configuration will continue to work.

" }, "ServerLaunchParameters":{ "shape":"NonZeroAndMaxString", - "documentation":"

This parameter is no longer used. Instead, specify server launch parameters in the RuntimeConfiguration parameter. (Requests that specify a server launch path and launch parameters instead of a run-time configuration will continue to work.)

" + "documentation":"

This parameter is no longer used. Instead, specify server launch parameters in the RuntimeConfiguration parameter. (Requests that specify a server launch path and launch parameters instead of a runtime configuration will continue to work.)

" }, "LogPaths":{ "shape":"StringList", @@ -1523,45 +1610,52 @@ }, "EC2InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

" + "documentation":"

The name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

" }, "EC2InboundPermissions":{ "shape":"IpPermissionsList", - "documentation":"

Range of IP addresses and port settings that permit inbound traffic to access game sessions that running on the fleet. For fleets using a custom game build, this parameter is required before game sessions running on the fleet can accept connections. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges for use by the Realtime servers. You can specify multiple permission settings or add more by updating the fleet.

" + "documentation":"

Range of IP addresses and port settings that permit inbound traffic to access game sessions that are running on the fleet. For fleets using a custom game build, this parameter is required before game sessions running on the fleet can accept connections. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges for use by the Realtime servers. You can specify multiple permission settings or add more by updating the fleet.

" }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

Game session protection policy to apply to all instances in this fleet. If this parameter is not set, instances in this fleet default to no protection. You can change a fleet's protection policy using UpdateFleetAttributes, but this change will only affect sessions created after the policy change. You can also set protection for individual instances using UpdateGameSession.

  • NoProtection -- The game session can be terminated during a scale-down event.

  • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

" + "documentation":"

A game session protection policy to apply to all instances in this fleet. If this parameter is not set, instances in this fleet default to no protection. You can change a fleet's protection policy using UpdateFleetAttributes, but this change will only affect sessions created after the policy change. You can also set protection for individual instances using UpdateGameSession.

  • NoProtection - The game session can be terminated during a scale-down event.

  • FullProtection - If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

" }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime Servers script. The run-time configuration lists the types of server processes to run on an instance and includes the following configuration settings: the server executable or launch script file, launch parameters, and the number of processes to run concurrently on each instance. A CreateFleet request must include a run-time configuration with at least one server process configuration.

" + "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime script. The runtime configuration defines the server executables or launch script file, launch parameters, and the number of processes to run concurrently on each instance. When creating a fleet, the runtime configuration must have at least one server process configuration; otherwise the request fails with an invalid request exception. (This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters, although requests that contain values for these parameters instead of a runtime configuration will continue to work.) This parameter is required unless the parameters ServerLaunchPath and ServerLaunchParameters are defined. Runtime configuration replaced these parameters, but fleets that use them will continue to work.

" }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", - "documentation":"

Policy that limits the number of game sessions an individual player can create over a span of time for this fleet.

" + "documentation":"

A policy that limits the number of game sessions an individual player can create over a span of time for this fleet.

" }, "MetricGroups":{ "shape":"MetricGroupList", - "documentation":"

Name of an Amazon CloudWatch metric group to add this fleet to. A metric group aggregates the metrics for all fleets in the group. Specify an existing metric group name, or provide a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" + "documentation":"

The name of an Amazon CloudWatch metric group to add this fleet to. A metric group aggregates the metrics for all fleets in the group. Specify an existing metric group name, or provide a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" }, "FleetType":{ "shape":"FleetType", - "documentation":"

Indicates whether to use on-demand instances or spot instances for this fleet. If empty, the default is ON_DEMAND. Both categories of instances use identical hardware and configurations based on the instance type selected for this fleet. Learn more about On-Demand versus Spot Instances.

" + "documentation":"

Indicates whether to use On-Demand instances or Spot instances for this fleet. If empty, the default is ON_DEMAND. Both categories of instances use identical hardware and configurations based on the instance type selected for this fleet. Learn more about On-Demand versus Spot Instances.

" }, "InstanceRoleArn":{ "shape":"NonEmptyString", - "documentation":"

Unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, daemons (background processes). Create a role or look up a role's ARN using the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" + "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" }, - "CertificateConfiguration":{"shape":"CertificateConfiguration"} + "CertificateConfiguration":{ + "shape":"CertificateConfiguration", + "documentation":"

Indicates whether to generate a TLS/SSL certificate for the new fleet. TLS certificates are used for encrypting traffic between game clients and game servers running on GameLift. If this parameter is not specified, the default value, DISABLED, is used. This fleet setting cannot be changed once the fleet is created. Learn more at Securing Client/Server Communication.

Note: This feature requires the AWS Certificate Manager (ACM) service, which is available in the AWS global partition but not in all other partitions. When working in a partition that does not support this feature, a request for a new fleet with certificate generation results fails with a 4xx unsupported Region error.

Valid values include:

  • GENERATED - Generate a TLS/SSL certificate for this fleet.

  • DISABLED - (default) Do not generate a TLS/SSL certificate for this fleet.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new fleet resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" + } }, "documentation":"

Represents the input for a request action.

" }, @@ -1581,19 +1675,19 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to create a game session in. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for a fleet to create a game session in. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for an alias associated with the fleet to create a game session in. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for an alias associated with the fleet to create a game session in. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Maximum number of players that can be connected simultaneously to the game session.

" + "documentation":"

The maximum number of players that can be connected simultaneously to the game session.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a game session. Session names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a game session. Session names do not need to be unique.

" }, "GameProperties":{ "shape":"GamePropertyList", @@ -1601,7 +1695,7 @@ }, "CreatorId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player or entity creating the game session. This ID is used to enforce a resource protection policy (if one exists) that limits the number of concurrent active game sessions one player can have.

" + "documentation":"

A unique identifier for a player or entity creating the game session. This ID is used to enforce a resource protection policy (if one exists) that limits the number of concurrent active game sessions one player can have.

" }, "GameSessionId":{ "shape":"IdStringModel", @@ -1634,19 +1728,23 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" + "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" + "documentation":"

The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

Collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, it is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement. A player latency policy must set a value for MaximumIndividualPlayerLatencyMilliseconds; if none is set, this API requests will fail.

" + "documentation":"

A collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, the policy is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement. A player latency policy must set a value for MaximumIndividualPlayerLatencyMilliseconds. If none is set, this API request fails.

" }, "Destinations":{ "shape":"GameSessionQueueDestinationList", - "documentation":"

List of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" + "documentation":"

A list of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new game session queue resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1656,7 +1754,7 @@ "members":{ "GameSessionQueue":{ "shape":"GameSessionQueue", - "documentation":"

Object that describes the newly created game session queue.

" + "documentation":"

An object that describes the newly created game session queue.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -1673,39 +1771,39 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + "documentation":"

A unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Meaningful description of the matchmaking configuration.

" + "documentation":"

A human-readable description of the matchmaking configuration.

" }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", - "documentation":"

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

" + "documentation":"

The maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

" }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", - "documentation":"

Flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + "documentation":"

A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" }, "RuleSetName":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

" + "shape":"MatchmakingRuleSetName", + "documentation":"

A unique identifier for a matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.

" }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications.

" + "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

Number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -1713,15 +1811,19 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

Method used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "documentation":"

The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1745,11 +1847,15 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. (Note: The rule set name is different from the optional \"name\" field in the rule set body.)

" + "documentation":"

A unique identifier for a matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. Note that the rule set name is different from the optional name field in the rule set body.

" }, "RuleSetBody":{ "shape":"RuleSetBody", - "documentation":"

Collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.

" + "documentation":"

A collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new matchmaking rule set resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1760,7 +1866,7 @@ "members":{ "RuleSet":{ "shape":"MatchmakingRuleSet", - "documentation":"

Object that describes the newly created matchmaking rule set.

" + "documentation":"

The newly created matchmaking rule set.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -1774,11 +1880,11 @@ "members":{ "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to add a player to.

" + "documentation":"

A unique identifier for the game session to add a player to.

" }, "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player. Player IDs are developer-defined.

" + "documentation":"

A unique identifier for a player. Player IDs are developer-defined.

" }, "PlayerData":{ "shape":"PlayerData", @@ -1806,7 +1912,7 @@ "members":{ "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to add players to.

" + "documentation":"

A unique identifier for the game session to add players to.

" }, "PlayerIds":{ "shape":"PlayerIdList", @@ -1824,7 +1930,7 @@ "members":{ "PlayerSessions":{ "shape":"PlayerSessionList", - "documentation":"

Collection of player session objects created for the added players.

" + "documentation":"

A collection of player session objects created for the added players.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -1834,19 +1940,23 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a script. Script names do not need to be unique. You can use UpdateScript to change this value later.

" + "documentation":"

A descriptive label that is associated with a script. Script names do not need to be unique. You can use UpdateScript to change this value later.

" }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

Version that is associated with a build or script. Version strings do not need to be unique. You can use UpdateScript to change this value later.

" + "documentation":"

The version that is associated with a build or script. Version strings do not need to be unique. You can use UpdateScript to change this value later.

" }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" + "documentation":"

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" }, "ZipFile":{ "shape":"ZipBlob", - "documentation":"

Data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB.

When using the AWS CLI tool to create a script, this parameter is set to the zip file name. It must be prepended with the string \"fileb://\" to indicate that the file data is a binary object. For example: --zip-file fileb://myRealtimeScript.zip.

" + "documentation":"

A data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB.

When using the AWS CLI tool to create a script, this parameter is set to the zip file name. It must be prepended with the string \"fileb://\" to indicate that the file data is a binary object. For example: --zip-file fileb://myRealtimeScript.zip.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new script resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } } }, @@ -1855,7 +1965,7 @@ "members":{ "Script":{ "shape":"Script", - "documentation":"

The newly created script record with a unique script ID. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift service.

" + "documentation":"

The newly created script record with a unique script ID and ARN. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift service.

" } } }, @@ -1868,11 +1978,11 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1897,15 +2007,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet. This tells Amazon GameLift which GameLift VPC to peer with.

" + "documentation":"

A unique identifier for a fleet. You can use either the fleet ID or ARN value. This tells Amazon GameLift which GameLift VPC to peer with.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1926,7 +2036,7 @@ "members":{ "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for a fleet alias. Specify the alias you want to delete.

" + "documentation":"

A unique identifier of the alias that you want to delete. You can use either the alias ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1937,7 +2047,7 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build to delete.

" + "documentation":"

A unique identifier for a build to delete. You can use either the build ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1948,7 +2058,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to be deleted.

" + "documentation":"

A unique identifier for a fleet to be deleted. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1959,10 +2069,10 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" + "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region. You can use either the queue ID or ARN value.

" } }, - "documentation":"

Represents the input for a request action.

" + "documentation":"

Represents the input for a request action.

" }, "DeleteGameSessionQueueOutput":{ "type":"structure", @@ -1974,8 +2084,8 @@ "required":["Name"], "members":{ "Name":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking configuration

" + "shape":"MatchmakingConfigurationName", + "documentation":"

A unique identifier for a matchmaking configuration. You can use either the configuration name or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1990,8 +2100,8 @@ "required":["Name"], "members":{ "Name":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set to be deleted. (Note: The rule set name is different from the optional \"name\" field in the rule set body.)

" + "shape":"MatchmakingRuleSetName", + "documentation":"

A unique identifier for a matchmaking rule set to be deleted. (Note: The rule set name is different from the optional \"name\" field in the rule set body.) You can use either the rule set name or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2011,11 +2121,11 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" }, "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to be deleted.

" + "documentation":"

A unique identifier for a fleet to be deleted. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2026,7 +2136,7 @@ "members":{ "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script to delete.

" + "documentation":"

A unique identifier for a Realtime script to delete. You can use either the script ID or ARN value.

" } } }, @@ -2039,11 +2149,11 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2062,11 +2172,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet. This value must match the fleet ID referenced in the VPC peering connection record.

" + "documentation":"

A unique identifier for a fleet. This fleet specified must match the fleet referenced in the VPC peering connection record. You can use either the fleet ID or ARN value.

" }, "VpcPeeringConnectionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC peering connection. This value is included in the VpcPeeringConnection object, which can be retrieved by calling DescribeVpcPeeringConnections.

" + "documentation":"

A unique identifier for a VPC peering connection. This value is included in the VpcPeeringConnection object, which can be retrieved by calling DescribeVpcPeeringConnections.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2082,7 +2192,7 @@ "members":{ "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for a fleet alias. Specify the alias you want to retrieve.

" + "documentation":"

The unique identifier for the fleet alias that you want to retrieve. You can use either the alias ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2092,7 +2202,7 @@ "members":{ "Alias":{ "shape":"Alias", - "documentation":"

Object that contains the requested alias.

" + "documentation":"

The requested alias resource.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2103,7 +2213,7 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build to retrieve properties for.

" + "documentation":"

A unique identifier for a build to retrieve properties for. You can use either the build ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2133,7 +2243,7 @@ "members":{ "EC2InstanceLimits":{ "shape":"EC2InstanceLimitList", - "documentation":"

Object that contains the maximum number of instances for the specified instance type.

" + "documentation":"

The maximum number of instances for the specified instance type.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2143,11 +2253,11 @@ "members":{ "FleetIds":{ "shape":"FleetIdList", - "documentation":"

Unique identifier for a fleet(s) to retrieve attributes for. To request attributes for all fleets, leave this parameter empty.

" + "documentation":"

A unique identifier for a fleet(s) to retrieve attributes for. You can use either the fleet ID or ARN value.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2161,7 +2271,7 @@ "members":{ "FleetAttributes":{ "shape":"FleetAttributesList", - "documentation":"

Collection of objects containing attribute metadata for each requested fleet ID.

" + "documentation":"

A collection of objects containing attribute metadata for each requested fleet ID.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2175,11 +2285,11 @@ "members":{ "FleetIds":{ "shape":"FleetIdList", - "documentation":"

Unique identifier for a fleet(s) to retrieve capacity information for. To request capacity information for all fleets, leave this parameter empty.

" + "documentation":"

A unique identifier for a fleet(s) to retrieve capacity information for. You can use either the fleet ID or ARN value.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2193,7 +2303,7 @@ "members":{ "FleetCapacity":{ "shape":"FleetCapacityList", - "documentation":"

Collection of objects containing capacity information for each requested fleet ID. Leave this parameter empty to retrieve capacity information for all fleets.

" + "documentation":"

A collection of objects containing capacity information for each requested fleet ID. Leave this parameter empty to retrieve capacity information for all fleets.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2208,7 +2318,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to get event logs for.

" + "documentation":"

A unique identifier for a fleet to get event logs for. You can use either the fleet ID or ARN value.

" }, "StartTime":{ "shape":"Timestamp", @@ -2220,7 +2330,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2234,7 +2344,7 @@ "members":{ "Events":{ "shape":"EventList", - "documentation":"

Collection of objects containing event log entries for the specified fleet.

" + "documentation":"

A collection of objects containing event log entries for the specified fleet.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2249,7 +2359,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to retrieve port settings for.

" + "documentation":"

A unique identifier for a fleet to retrieve port settings for. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2259,7 +2369,7 @@ "members":{ "InboundPermissions":{ "shape":"IpPermissionsList", - "documentation":"

Object that contains port settings for the requested fleet ID.

" + "documentation":"

The port settings for the requested fleet ID.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2269,11 +2379,11 @@ "members":{ "FleetIds":{ "shape":"FleetIdList", - "documentation":"

Unique identifier for a fleet(s) to retrieve utilization data for. To request utilization data for all fleets, leave this parameter empty.

" + "documentation":"

A unique identifier for a fleet(s) to retrieve utilization data for. You can use either the fleet ID or ARN value.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2287,7 +2397,7 @@ "members":{ "FleetUtilization":{ "shape":"FleetUtilizationList", - "documentation":"

Collection of objects containing utilization information for each requested fleet ID.

" + "documentation":"

A collection of objects containing utilization information for each requested fleet ID.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2301,15 +2411,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to retrieve all game sessions active on the fleet.

" + "documentation":"

A unique identifier for a fleet to retrieve all game sessions active on the fleet. You can use either the fleet ID or ARN value.

" }, "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to retrieve.

" + "documentation":"

A unique identifier for the game session to retrieve.

" }, "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for an alias associated with the fleet to retrieve all game sessions for.

" + "documentation":"

A unique identifier for an alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" }, "StatusFilter":{ "shape":"NonZeroAndMaxString", @@ -2317,7 +2427,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2331,7 +2441,7 @@ "members":{ "GameSessionDetails":{ "shape":"GameSessionDetailList", - "documentation":"

Collection of objects containing game session properties and the protection policy currently in force for each session matching the request.

" + "documentation":"

A collection of objects containing game session properties and the protection policy currently in force for each session matching the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2346,7 +2456,7 @@ "members":{ "PlacementId":{ "shape":"IdStringModel", - "documentation":"

Unique identifier for a game session placement to retrieve.

" + "documentation":"

A unique identifier for a game session placement to retrieve.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2366,15 +2476,15 @@ "members":{ "Names":{ "shape":"GameSessionQueueNameList", - "documentation":"

List of queue names to retrieve information for. To request settings for all queues, leave this parameter empty.

" + "documentation":"

A list of queue names to retrieve information for. You can use either the queue ID or ARN value. To request settings for all queues, leave this parameter empty.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2384,11 +2494,11 @@ "members":{ "GameSessionQueues":{ "shape":"GameSessionQueueList", - "documentation":"

Collection of objects that describes the requested game session queues.

" + "documentation":"

A collection of objects that describe the requested game session queues.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2398,15 +2508,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to retrieve all game sessions for.

" + "documentation":"

A unique identifier for a fleet to retrieve all game sessions for. You can use either the fleet ID or ARN value.

" }, "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to retrieve. You can use either a GameSessionId or GameSessionArn value.

" + "documentation":"

A unique identifier for the game session to retrieve.

" }, "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for an alias associated with the fleet to retrieve all game sessions for.

" + "documentation":"

A unique identifier for an alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" }, "StatusFilter":{ "shape":"NonZeroAndMaxString", @@ -2414,7 +2524,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2428,7 +2538,7 @@ "members":{ "GameSessions":{ "shape":"GameSessionList", - "documentation":"

Collection of objects containing game session properties for each session matching the request.

" + "documentation":"

A collection of objects containing game session properties for each session matching the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2443,15 +2553,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to retrieve instance information for.

" + "documentation":"

A unique identifier for a fleet to retrieve instance information for. You can use either the fleet ID or ARN value.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

Unique identifier for an instance to retrieve. Specify an instance ID or leave blank to retrieve all instances in the fleet.

" + "documentation":"

A unique identifier for an instance to retrieve. Specify an instance ID or leave blank to retrieve all instances in the fleet.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2465,7 +2575,7 @@ "members":{ "Instances":{ "shape":"InstanceList", - "documentation":"

Collection of objects containing properties for each instance returned.

" + "documentation":"

A collection of objects containing properties for each instance returned.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2478,20 +2588,20 @@ "type":"structure", "members":{ "Names":{ - "shape":"MatchmakingIdList", - "documentation":"

Unique identifier for a matchmaking configuration(s) to retrieve. To request all existing configurations, leave this parameter empty.

" + "shape":"MatchmakingConfigurationNameList", + "documentation":"

A unique identifier for a matchmaking configuration(s) to retrieve. You can use either the configuration name or ARN value. To request all existing configurations, leave this parameter empty.

" }, "RuleSetName":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set. Use this parameter to retrieve all matchmaking configurations that use this rule set.

" + "shape":"MatchmakingRuleSetName", + "documentation":"

A unique identifier for a matchmaking rule set. You can use either the rule set name or ARN value. Use this parameter to retrieve all matchmaking configurations that use this rule set.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is limited to 10.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is limited to 10.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2501,11 +2611,11 @@ "members":{ "Configurations":{ "shape":"MatchmakingConfigurationList", - "documentation":"

Collection of requested matchmaking configuration objects.

" + "documentation":"

A collection of requested matchmaking configurations.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2516,7 +2626,7 @@ "members":{ "TicketIds":{ "shape":"MatchmakingIdList", - "documentation":"

Unique identifier for a matchmaking ticket. You can include up to 10 ID values.

" + "documentation":"

A unique identifier for a matchmaking ticket. You can include up to 10 ID values.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2526,7 +2636,7 @@ "members":{ "TicketList":{ "shape":"MatchmakingTicketList", - "documentation":"

Collection of existing matchmaking ticket objects matching the request.

" + "documentation":"

A collection of existing matchmaking ticket objects matching the request.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2536,15 +2646,15 @@ "members":{ "Names":{ "shape":"MatchmakingRuleSetNameList", - "documentation":"

List of one or more matchmaking rule set names to retrieve details for. (Note: The rule set name is different from the optional \"name\" field in the rule set body.)

" + "documentation":"

A list of one or more matchmaking rule set names to retrieve details for. (Note: The rule set name is different from the optional \"name\" field in the rule set body.) You can use either the rule set name or ARN value.

" }, "Limit":{ "shape":"RuleSetLimit", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2555,11 +2665,11 @@ "members":{ "RuleSets":{ "shape":"MatchmakingRuleSetList", - "documentation":"

Collection of requested matchmaking rule set objects.

" + "documentation":"

A collection of requested matchmaking rule set objects.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2569,15 +2679,15 @@ "members":{ "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to retrieve player sessions for.

" + "documentation":"

A unique identifier for the game session to retrieve player sessions for.

" }, "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player to retrieve player sessions for.

" + "documentation":"

A unique identifier for a player to retrieve player sessions for.

" }, "PlayerSessionId":{ "shape":"PlayerSessionId", - "documentation":"

Unique identifier for a player session to retrieve.

" + "documentation":"

A unique identifier for a player session to retrieve.

" }, "PlayerSessionStatusFilter":{ "shape":"NonZeroAndMaxString", @@ -2585,7 +2695,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. If a player session ID is specified, this parameter is ignored.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. If a player session ID is specified, this parameter is ignored.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2599,7 +2709,7 @@ "members":{ "PlayerSessions":{ "shape":"PlayerSessionList", - "documentation":"

Collection of objects containing properties for each player session that matches the request.

" + "documentation":"

A collection of objects containing properties for each player session that matches the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2614,7 +2724,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to get the run-time configuration for.

" + "documentation":"

A unique identifier for a fleet to get the runtime configuration for. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2635,7 +2745,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to retrieve scaling policies for.

" + "documentation":"

A unique identifier for a fleet to retrieve scaling policies for. You can use either the fleet ID or ARN value.

" }, "StatusFilter":{ "shape":"ScalingStatusType", @@ -2643,7 +2753,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2657,7 +2767,7 @@ "members":{ "ScalingPolicies":{ "shape":"ScalingPolicyList", - "documentation":"

Collection of objects containing the scaling policies matching the request.

" + "documentation":"

A collection of objects containing the scaling policies matching the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2672,7 +2782,7 @@ "members":{ "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script to retrieve properties for.

" + "documentation":"

A unique identifier for a Realtime script to retrieve properties for. You can use either the script ID or ARN value.

" } } }, @@ -2681,7 +2791,7 @@ "members":{ "Script":{ "shape":"Script", - "documentation":"

Set of properties describing the requested script.

" + "documentation":"

A set of properties describing the requested script.

" } } }, @@ -2695,7 +2805,7 @@ "members":{ "VpcPeeringAuthorizations":{ "shape":"VpcPeeringAuthorizationList", - "documentation":"

Collection of objects that describe all valid VPC peering operations for the current AWS account.

" + "documentation":"

A collection of objects that describe all valid VPC peering operations for the current AWS account.

" } } }, @@ -2704,7 +2814,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet.

" + "documentation":"

A unique identifier for a fleet. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2714,7 +2824,7 @@ "members":{ "VpcPeeringConnections":{ "shape":"VpcPeeringConnectionList", - "documentation":"

Collection of VPC peering connection records that match the request.

" + "documentation":"

A collection of VPC peering connection records that match the request.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -2724,7 +2834,7 @@ "members":{ "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player to associate with the player session.

" + "documentation":"

A unique identifier for a player to associate with the player session.

" }, "PlayerData":{ "shape":"PlayerData", @@ -2749,11 +2859,11 @@ }, "MINIMUM":{ "shape":"WholeNumber", - "documentation":"

Minimum value allowed for the fleet's instance count.

" + "documentation":"

The minimum value allowed for the fleet's instance count.

" }, "MAXIMUM":{ "shape":"WholeNumber", - "documentation":"

Maximum value allowed for the fleet's instance count.

" + "documentation":"

The maximum value allowed for the fleet's instance count.

" }, "PENDING":{ "shape":"WholeNumber", @@ -2772,7 +2882,7 @@ "documentation":"

Number of instances in the fleet that are no longer active but haven't yet been terminated.

" } }, - "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

" + "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

" }, "EC2InstanceLimit":{ "type":"structure", @@ -2790,7 +2900,7 @@ "documentation":"

Number of instances allowed.

" } }, - "documentation":"

Maximum number of instances allowed based on the Amazon Elastic Compute Cloud (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits.

" + "documentation":"

The maximum number of instances allowed based on the Amazon Elastic Compute Cloud (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits.

" }, "EC2InstanceLimitList":{ "type":"list", @@ -2864,15 +2974,15 @@ "members":{ "EventId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a fleet event.

" + "documentation":"

A unique identifier for a fleet event.

" }, "ResourceId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for an event resource, such as a fleet ID.

" + "documentation":"

A unique identifier for an event resource, such as a fleet ID.

" }, "EventCode":{ "shape":"EventCode", - "documentation":"

Type of event being logged. The following events are currently in use:

Fleet creation events:

  • FLEET_CREATED -- A fleet record was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

  • FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's run-time configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the run-time configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. A possible reason for failure is that the game server is not reporting \"process ready\" to the Amazon GameLift service.

  • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEET_DELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

" + "documentation":"

The type of event being logged.

Fleet creation events (ordered by fleet creation activity):

  • FLEET_CREATED -- A fleet record was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

  • FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. Learn more at Debug Fleet Creation Issues

  • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEET_DELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

" }, "Message":{ "shape":"NonEmptyString", @@ -2946,11 +3056,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet.

" + "documentation":"

A unique identifier for a fleet.

" }, "FleetArn":{ "shape":"ArnStringModel", - "documentation":"

Identifier for a fleet that is unique across all regions.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift fleet ARN, the resource ID matches the FleetId value.

" }, "FleetType":{ "shape":"FleetType", @@ -2966,7 +3076,7 @@ }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a fleet. Fleet names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a fleet. Fleet names do not need to be unique.

" }, "CreationTime":{ "shape":"Timestamp", @@ -2982,11 +3092,19 @@ }, "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build.

" + "documentation":"

A unique identifier for a build.

" + }, + "BuildArn":{ + "shape":"BuildArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift build resource that is deployed on instances in this fleet. In a GameLift build ARN, the resource ID matches the BuildId value.

" }, "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script.

" + "documentation":"

A unique identifier for a Realtime script.

" + }, + "ScriptArn":{ + "shape":"ScriptArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift script resource that is deployed on instances in this fleet. In a GameLift script ARN, the resource ID matches the ScriptId value.

" }, "ServerLaunchPath":{ "shape":"NonZeroAndMaxString", @@ -3002,7 +3120,7 @@ }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

Type of game session protection to set for all new instances started in the fleet.

  • NoProtection -- The game session can be terminated during a scale-down event.

  • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

" + "documentation":"

The type of game session protection to set for all new instances started in the fleet.

  • NoProtection -- The game session can be terminated during a scale-down event.

  • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

" }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -3022,11 +3140,14 @@ }, "InstanceRoleArn":{ "shape":"NonEmptyString", - "documentation":"

Unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, daemons (background processes). Create a role or look up a role's ARN using the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" + "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" }, - "CertificateConfiguration":{"shape":"CertificateConfiguration"} + "CertificateConfiguration":{ + "shape":"CertificateConfiguration", + "documentation":"

Indicates whether a TLS/SSL certificate was generated for the fleet.

" + } }, - "documentation":"

General properties describing a fleet.

" + "documentation":"

General properties describing a fleet.

" }, "FleetAttributesList":{ "type":"list", @@ -3037,7 +3158,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet.

" + "documentation":"

A unique identifier for a fleet.

" }, "InstanceType":{ "shape":"EC2InstanceType", @@ -3048,7 +3169,7 @@ "documentation":"

Current status of fleet capacity.

" } }, - "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

" + "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

" }, "FleetCapacityExceededException":{ "type":"structure", @@ -3064,7 +3185,7 @@ }, "FleetId":{ "type":"string", - "pattern":"^fleet-\\S+" + "pattern":"^fleet-\\S+|^arn:.*:fleet\\/fleet-\\S+" }, "FleetIdList":{ "type":"list", @@ -3097,7 +3218,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet.

" + "documentation":"

A unique identifier for a fleet.

" }, "ActiveServerProcessCount":{ "shape":"WholeNumber", @@ -3113,10 +3234,10 @@ }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Maximum players allowed across all game sessions currently being hosted on all instances in the fleet.

" + "documentation":"

The maximum number of players allowed across all game sessions currently being hosted on all instances in the fleet.

" } }, - "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

" + "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

" }, "FleetUtilizationList":{ "type":"list", @@ -3133,14 +3254,14 @@ "members":{ "Key":{ "shape":"GamePropertyKey", - "documentation":"

Game property identifier.

" + "documentation":"

The game property identifier.

" }, "Value":{ "shape":"GamePropertyValue", - "documentation":"

Game property value.

" + "documentation":"

The game property value.

" } }, - "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session, such as to specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session; the server process uses the properties as appropriate. For more information, see the Amazon GameLift Developer Guide.

" + "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session. For example, a game property might specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session. For more information, see the Amazon GameLift Developer Guide.

" }, "GamePropertyKey":{ "type":"string", @@ -3160,15 +3281,19 @@ "members":{ "GameSessionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the game session. A game session ARN has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>.

" + "documentation":"

A unique identifier for the game session. A game session ARN has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a game session. Session names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a game session. Session names do not need to be unique.

" }, "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that the game session is running on.

" + "documentation":"

A unique identifier for a fleet that the game session is running on.

" + }, + "FleetArn":{ + "shape":"ArnStringModel", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

" }, "CreationTime":{ "shape":"Timestamp", @@ -3184,7 +3309,7 @@ }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Maximum number of players that can be connected simultaneously to the game session.

" + "documentation":"

The maximum number of players that can be connected simultaneously to the game session.

" }, "Status":{ "shape":"GameSessionStatus", @@ -3200,9 +3325,12 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number.

" + }, + "DnsName":{ + "shape":"DnsName", + "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

  • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

  • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, - "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PortNumber", "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" @@ -3213,7 +3341,7 @@ }, "CreatorId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player. This ID is used to enforce a resource protection policy (if one exists), that limits the number of game sessions a player can create.

" + "documentation":"

A unique identifier for a player. This ID is used to enforce a resource protection policy (if one exists), that limits the number of game sessions a player can create.

" }, "GameSessionData":{ "shape":"GameSessionData", @@ -3236,20 +3364,23 @@ "members":{ "GameSessionArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" }, "IpAddress":{ "shape":"StringModel", - "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number.

" + }, + "DnsName":{ + "shape":"DnsName", + "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

  • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

  • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, - "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PositiveInteger", "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" }, "MatchedPlayerSessions":{ "shape":"MatchedPlayerSessionList", - "documentation":"

Collection of player session IDs, one for each player ID that was included in the original matchmaking request.

" + "documentation":"

A collection of player session IDs, one for each player ID that was included in the original matchmaking request.

" } }, "documentation":"

Connection information for the new game session that is created with matchmaking. (with StartMatchmaking). Once a match is set, the FlexMatch engine places the match and creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

" @@ -3294,15 +3425,15 @@ "members":{ "PlacementId":{ "shape":"IdStringModel", - "documentation":"

Unique identifier for a game session placement.

" + "documentation":"

A unique identifier for a game session placement.

" }, "GameSessionQueueName":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" + "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region.

" }, "Status":{ "shape":"GameSessionPlacementState", - "documentation":"

Current status of the game session placement request.

  • PENDING -- The placement request is currently in the queue waiting to be processed.

  • FULFILLED -- A new game session and player sessions (if requested) have been successfully created. Values for GameSessionArn and GameSessionRegion are available.

  • CANCELLED -- The placement request was canceled with a call to StopGameSessionPlacement.

  • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

" + "documentation":"

Current status of the game session placement request.

  • PENDING -- The placement request is currently in the queue waiting to be processed.

  • FULFILLED -- A new game session and player sessions (if requested) have been successfully created. Values for GameSessionArn and GameSessionRegion are available.

  • CANCELLED -- The placement request was canceled with a call to StopGameSessionPlacement.

  • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

  • FAILED -- GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

" }, "GameProperties":{ "shape":"GamePropertyList", @@ -3310,27 +3441,27 @@ }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Maximum number of players that can be connected simultaneously to the game session.

" + "documentation":"

The maximum number of players that can be connected simultaneously to the game session.

" }, "GameSessionName":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a game session. Session names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a game session. Session names do not need to be unique.

" }, "GameSessionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the game session. This value is set once the new game session is placed (placement status is FULFILLED).

" + "documentation":"

A unique identifier for the game session. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "GameSessionArn":{ "shape":"NonZeroAndMaxString", - "documentation":"

Identifier for the game session created by this placement request. This value is set once the new game session is placed (placement status is FULFILLED). This identifier is unique across all regions. You can use this value as a GameSessionId value as needed.

" + "documentation":"

Identifier for the game session created by this placement request. This value is set once the new game session is placed (placement status is FULFILLED). This identifier is unique across all Regions. You can use this value as a GameSessionId value as needed.

" }, "GameSessionRegion":{ "shape":"NonZeroAndMaxString", - "documentation":"

Name of the region where the game session created by this placement request is running. This value is set once the new game session is placed (placement status is FULFILLED).

" + "documentation":"

Name of the Region where the game session created by this placement request is running. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS regions.

" + "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS Regions.

" }, "StartTime":{ "shape":"Timestamp", @@ -3342,16 +3473,19 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" + "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" + }, + "DnsName":{ + "shape":"DnsName", + "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

  • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

  • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, - "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PortNumber", "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "PlacedPlayerSessions":{ "shape":"PlacedPlayerSessionList", - "documentation":"

Collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID (as provided in the placement request) and the corresponding player session ID. Retrieve full player sessions by calling DescribePlayerSessions with the player session ID.

" + "documentation":"

A collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID (as provided in the placement request) and the corresponding player session ID. Retrieve full player sessions by calling DescribePlayerSessions with the player session ID.

" }, "GameSessionData":{ "shape":"GameSessionData", @@ -3379,23 +3513,23 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" + "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region.

" }, "GameSessionQueueArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift game session queue ARN, the resource ID matches the Name value.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" + "documentation":"

The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

Collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, it is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement.

" + "documentation":"

A collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, the policy is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement.

" }, "Destinations":{ "shape":"GameSessionQueueDestinationList", - "documentation":"

List of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" + "documentation":"

A list of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" } }, "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

  • The destinations where a new game session can potentially be hosted. Amazon GameLift tries these destinations in an order based on either the queue's default order or player latency information, if provided in a placement request. With latency information, Amazon GameLift can place game sessions where the majority of players are reporting the lowest possible latency.

  • The length of time that placement requests can wait in the queue before timing out.

  • A set of optional latency policies that protect individual players from high latencies, preventing game sessions from being placed where any individual player is reporting latency higher than a policy's maximum.

" @@ -3405,10 +3539,10 @@ "members":{ "DestinationArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) assigned to fleet or fleet alias. ARNs, which include a fleet ID or alias ID and a region name, provide a unique identifier across all regions.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to fleet or fleet alias. ARNs, which include a fleet ID or alias ID and a Region name, provide a unique identifier across all Regions.

" } }, - "documentation":"

Fleet designated in a game session queue. Requests for new game sessions in the queue are fulfilled by starting a new game session on any destination configured for a queue.

" + "documentation":"

Fleet designated in a game session queue. Requests for new game sessions in the queue are fulfilled by starting a new game session on any destination that is configured for a queue.

" }, "GameSessionQueueDestinationList":{ "type":"list", @@ -3420,9 +3554,9 @@ }, "GameSessionQueueName":{ "type":"string", - "max":128, + "max":256, "min":1, - "pattern":"[a-zA-Z0-9-]+" + "pattern":"[a-zA-Z0-9-]+|^arn:.*:gamesessionqueue\\/[a-zA-Z0-9-]+" }, "GameSessionQueueNameList":{ "type":"list", @@ -3448,7 +3582,7 @@ "members":{ "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to get logs for.

" + "documentation":"

A unique identifier for the game session to get logs for.

" } }, "documentation":"

Represents the input for a request action.

" @@ -3472,11 +3606,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that contains the instance you want access to. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

" + "documentation":"

A unique identifier for a fleet that contains the instance you want access to. You can use either the fleet ID or ARN value. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

Unique identifier for an instance you want to get access to. You can access an instance in any status.

" + "documentation":"

A unique identifier for an instance you want to get access to. You can access an instance in any status.

" } }, "documentation":"

Represents the input for a request action.

" @@ -3486,7 +3620,7 @@ "members":{ "InstanceAccess":{ "shape":"InstanceAccess", - "documentation":"

Object that contains connection information for a fleet instance, including IP address and access credentials.

" + "documentation":"

The connection information for a fleet instance, including IP address and access credentials.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -3510,17 +3644,20 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that the instance is in.

" + "documentation":"

A unique identifier for a fleet that the instance is in.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

Unique identifier for an instance.

" + "documentation":"

A unique identifier for an instance.

" }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address assigned to the instance.

" + "documentation":"

IP address that is assigned to the instance.

" + }, + "DnsName":{ + "shape":"DnsName", + "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

  • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

  • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, - "DnsName":{"shape":"DnsName"}, "OperatingSystem":{ "shape":"OperatingSystem", "documentation":"

Operating system that is running on this instance.

" @@ -3545,15 +3682,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet containing the instance being accessed.

" + "documentation":"

A unique identifier for a fleet containing the instance being accessed.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

Unique identifier for an instance being accessed.

" + "documentation":"

A unique identifier for an instance being accessed.

" }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address assigned to the instance.

" + "documentation":"

IP address that is assigned to the instance.

" }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -3643,22 +3780,22 @@ "members":{ "FromPort":{ "shape":"PortNumber", - "documentation":"

Starting value for a range of allowed port numbers.

" + "documentation":"

A starting value for a range of allowed port numbers.

" }, "ToPort":{ "shape":"PortNumber", - "documentation":"

Ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be higher than FromPort.

" + "documentation":"

An ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be higher than FromPort.

" }, "IpRange":{ "shape":"NonBlankString", - "documentation":"

Range of allowed IP addresses. This value must be expressed in CIDR notation. Example: \"000.000.000.000/[subnet mask]\" or optionally the shortened version \"0.0.0.0/[subnet mask]\".

" + "documentation":"

A range of allowed IP addresses. This value must be expressed in CIDR notation. Example: \"000.000.000.000/[subnet mask]\" or optionally the shortened version \"0.0.0.0/[subnet mask]\".

" }, "Protocol":{ "shape":"IpProtocol", - "documentation":"

Network communication protocol used by the fleet.

" + "documentation":"

The network communication protocol used by the fleet.

" } }, - "documentation":"

A range of IP addresses and port settings that allow inbound traffic to connect to server processes on an Amazon GameLift. New game sessions that are started on the fleet are assigned an IP address/port number combination, which must fall into the fleet's allowed ranges. For fleets created with a custom game server, the ranges reflect the server's game session assignments. For Realtime Servers fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging and one for UDP for use by the Realtime servers.

" + "documentation":"

A range of IP addresses and port settings that allow inbound traffic to connect to server processes on an Amazon GameLift hosting resource. New game sessions that are started on the fleet are assigned an IP address/port number combination, which must fall into the fleet's allowed ranges. For fleets created with a custom game server, the ranges reflect the server's game session assignments. For Realtime Servers fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging and one for UDP for use by the Realtime servers.

" }, "IpPermissionsList":{ "type":"list", @@ -3690,19 +3827,19 @@ "members":{ "RoutingStrategyType":{ "shape":"RoutingStrategyType", - "documentation":"

Type of routing to filter results on. Use this parameter to retrieve only aliases of a certain type. To retrieve all aliases, leave this parameter empty.

Possible routing types include the following:

  • SIMPLE -- The alias resolves to one specific fleet. Use this type when routing to active fleets.

  • TERMINAL -- The alias does not resolve to a fleet but instead can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException with the RoutingStrategy message embedded.

" + "documentation":"

The routing type to filter results on. Use this parameter to retrieve only aliases with a certain routing type. To retrieve all aliases, leave this parameter empty.

Possible routing types include the following:

  • SIMPLE -- The alias resolves to one specific fleet. Use this type when routing to active fleets.

  • TERMINAL -- The alias does not resolve to a fleet but instead can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException with the RoutingStrategy message embedded.

" }, "Name":{ "shape":"NonEmptyString", - "documentation":"

Descriptive label that is associated with an alias. Alias names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with an alias. Alias names do not need to be unique.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -3712,11 +3849,11 @@ "members":{ "Aliases":{ "shape":"AliasList", - "documentation":"

Collection of alias records that match the list request.

" + "documentation":"

A collection of alias resources that match the request parameters.

" }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -3730,7 +3867,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonEmptyString", @@ -3744,7 +3881,7 @@ "members":{ "Builds":{ "shape":"BuildList", - "documentation":"

Collection of build records that match the request.

" + "documentation":"

A collection of build records that match the request.

" }, "NextToken":{ "shape":"NonEmptyString", @@ -3758,15 +3895,15 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build to return fleets for. Use this parameter to return only fleets using the specified build. To retrieve all fleets, leave this parameter empty.

" + "documentation":"

A unique identifier for a build to return fleets for. Use this parameter to return only fleets using the specified build. Use either the build ID or ARN value.To retrieve all fleets, leave this parameter empty.

" }, "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script to return fleets for. Use this parameter to return only fleets using the specified script. To retrieve all fleets, leave this parameter empty.

" + "documentation":"

A unique identifier for a Realtime script to return fleets for. Use this parameter to return only fleets using the specified script. Use either the script ID or ARN value.To retrieve all fleets, leave this parameter empty.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -3794,11 +3931,11 @@ "members":{ "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } } }, @@ -3807,11 +3944,30 @@ "members":{ "Scripts":{ "shape":"ScriptList", - "documentation":"

Set of properties describing the requested script.

" + "documentation":"

A set of properties describing the requested script.

" }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to and uniquely identifies the GameLift resource that you want to retrieve tags for. GameLift resource ARNs are included in the data object for the resource, which can be retrieved by calling a List or Describe action for the resource type.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The collection of tags that have been assigned to the specified resource.

" } } }, @@ -3820,11 +3976,11 @@ "members":{ "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player

" + "documentation":"

A unique identifier for a player

" }, "PlayerSessionId":{ "shape":"PlayerSessionId", - "documentation":"

Unique identifier for a player session

" + "documentation":"

A unique identifier for a player session

" } }, "documentation":"

Represents a new player session that is created as a result of a successful FlexMatch match. A successful match automatically creates new player sessions for every player ID in the original matchmaking request.

When players connect to the match's game session, they must include both player ID and player session ID in order to claim their assigned player slot.

" @@ -3848,39 +4004,47 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + "documentation":"

A unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + }, + "ConfigurationArn":{ + "shape":"MatchmakingConfigurationArn", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with matchmaking configuration.

" + "documentation":"

A descriptive label that is associated with matchmaking configuration.

" }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. GameLift uses the listed queues when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", - "documentation":"

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

" + "documentation":"

The maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

" }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", - "documentation":"

Flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + "documentation":"

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" }, "RuleSetName":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

" + "documentation":"

A unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same Region.

" + }, + "RuleSetArn":{ + "shape":"MatchmakingRuleSetArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift matchmaking rule set resource that this configuration uses.

" }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications.

" + "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

Number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -3888,27 +4052,42 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

The time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

Method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "documentation":"

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" } }, "documentation":"

Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

" }, + "MatchmakingConfigurationArn":{ + "type":"string", + "documentation":"Data type used for Matchmaking Configuration ARN.", + "pattern":"^arn:.*:matchmakingconfiguration\\/[a-zA-Z0-9-\\.]*" + }, "MatchmakingConfigurationList":{ "type":"list", "member":{"shape":"MatchmakingConfiguration"} }, + "MatchmakingConfigurationName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]*|^arn:.*:matchmakingconfiguration\\/[a-zA-Z0-9-\\.]*" + }, + "MatchmakingConfigurationNameList":{ + "type":"list", + "member":{"shape":"MatchmakingConfigurationName"} + }, "MatchmakingConfigurationStatus":{ "type":"string", "enum":[ @@ -3942,26 +4121,41 @@ "members":{ "RuleSetName":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set

" + "documentation":"

A unique identifier for a matchmaking rule set

" + }, + "RuleSetArn":{ + "shape":"MatchmakingRuleSetArn", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift rule set ARN, the resource ID matches the RuleSetName value.

" }, "RuleSetBody":{ "shape":"RuleSetBody", - "documentation":"

Collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.

" + "documentation":"

A collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

The time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

" }, + "MatchmakingRuleSetArn":{ + "type":"string", + "documentation":"Data type used for Matchmaking RuleSet ARN.", + "pattern":"^arn:.*:matchmakingruleset\\/[a-zA-Z0-9-\\.]*" + }, "MatchmakingRuleSetList":{ "type":"list", "member":{"shape":"MatchmakingRuleSet"} }, + "MatchmakingRuleSetName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]*|^arn:.*:matchmakingruleset\\/[a-zA-Z0-9-\\.]*" + }, "MatchmakingRuleSetNameList":{ "type":"list", - "member":{"shape":"MatchmakingIdStringModel"}, + "member":{"shape":"MatchmakingRuleSetName"}, "max":10, "min":1 }, @@ -3970,12 +4164,16 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking ticket.

" + "documentation":"

A unique identifier for a matchmaking ticket.

" }, "ConfigurationName":{ "shape":"MatchmakingIdStringModel", "documentation":"

Name of the MatchmakingConfiguration that is used with this ticket. Matchmaking configurations determine how players are grouped into a match and how a new game session is created for the match.

" }, + "ConfigurationArn":{ + "shape":"MatchmakingConfigurationArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift matchmaking configuration resource that is used with this ticket.

" + }, "Status":{ "shape":"MatchmakingConfigurationStatus", "documentation":"

Current status of the matchmaking request.

  • QUEUED -- The matchmaking request has been received and is currently waiting to be processed.

  • SEARCHING -- The matchmaking request is currently being processed.

  • REQUIRES_ACCEPTANCE -- A match has been proposed and the players must accept the match (see AcceptMatch). This status is used only with requests that use a matchmaking configuration with a player acceptance requirement.

  • PLACING -- The FlexMatch engine has matched players and is in the process of placing a new game session for the match.

  • COMPLETED -- Players have been matched and a game session is ready to host the players. A ticket in this state contains the necessary connection information for players.

  • FAILED -- The matchmaking request was not completed.

  • CANCELLED -- The matchmaking request was canceled. This may be the result of a call to StopMatchmaking or a proposed match that one or more players failed to accept.

  • TIMED_OUT -- The matchmaking request was not successful within the duration specified in the matchmaking configuration.

Matchmaking requests that fail to successfully complete (statuses FAILED, CANCELLED, TIMED_OUT) can be resubmitted as new requests with new ticket IDs.

" @@ -4086,11 +4284,11 @@ "members":{ "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player that is associated with this player session.

" + "documentation":"

A unique identifier for a player that is associated with this player session.

" }, "PlayerSessionId":{ "shape":"PlayerSessionId", - "documentation":"

Unique identifier for a player session.

" + "documentation":"

A unique identifier for a player session.

" } }, "documentation":"

Information about a player session that was created as part of a StartGameSessionPlacement request. This object contains only the player ID and player session ID. To retrieve full details on a player session, call DescribePlayerSessions with the player session ID.

" @@ -4104,11 +4302,11 @@ "members":{ "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player

" + "documentation":"

A unique identifier for a player

" }, "PlayerAttributes":{ "shape":"PlayerAttributeMap", - "documentation":"

Collection of key:value pairs containing player information for use in matchmaking. Player attribute keys must match the playerAttributes used in a matchmaking rule set. Example: \"PlayerAttributes\": {\"skill\": {\"N\": \"23\"}, \"gameMode\": {\"S\": \"deathmatch\"}}.

" + "documentation":"

A collection of key:value pairs containing player information for use in matchmaking. Player attribute keys must match the playerAttributes used in a matchmaking rule set. Example: \"PlayerAttributes\": {\"skill\": {\"N\": \"23\"}, \"gameMode\": {\"S\": \"deathmatch\"}}.

" }, "Team":{ "shape":"NonZeroAndMaxString", @@ -4116,7 +4314,7 @@ }, "LatencyInMs":{ "shape":"LatencyMap", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS regions. If this property is present, FlexMatch considers placing the match only in regions for which latency is reported.

If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no regions are available to the player and the ticket is not matchable.

" + "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported.

If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable.

" } }, "documentation":"

Represents a player in matchmaking. When starting a matchmaking request, a player has a player ID, attributes, and may have latency data. Team information is added after a match has been successfully completed.

" @@ -4147,18 +4345,18 @@ "members":{ "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player associated with the latency data.

" + "documentation":"

A unique identifier for a player associated with the latency data.

" }, "RegionIdentifier":{ "shape":"NonZeroAndMaxString", - "documentation":"

Name of the region that is associated with the latency value.

" + "documentation":"

Name of the Region that is associated with the latency value.

" }, "LatencyInMilliseconds":{ "shape":"Float", - "documentation":"

Amount of time that represents the time lag experienced by the player when connected to the specified region.

" + "documentation":"

Amount of time that represents the time lag experienced by the player when connected to the specified Region.

" } }, - "documentation":"

Regional latency information for a player, used when requesting a new game session with StartGameSessionPlacement. This value indicates the amount of time lag that exists when the player is connected to a fleet in the specified region. The relative difference between a player's latency values for multiple regions are used to determine which fleets are best suited to place a new game session for the player.

" + "documentation":"

Regional latency information for a player, used when requesting a new game session with StartGameSessionPlacement. This value indicates the amount of time lag that exists when the player is connected to a fleet in the specified Region. The relative difference between a player's latency values for multiple Regions are used to determine which fleets are best suited to place a new game session for the player.

" }, "PlayerLatencyList":{ "type":"list", @@ -4176,7 +4374,7 @@ "documentation":"

The length of time, in seconds, that the policy is enforced while placing a new game session. A null value for this property means that the policy is enforced until the queue times out.

" } }, - "documentation":"

Queue setting that determines the highest latency allowed for individual players when placing a game session. When a latency policy is in force, a game session cannot be placed at any destination in a region where a player is reporting latency higher than the cap. Latency policies are only enforced when the placement request contains player latency information.

" + "documentation":"

Queue setting that determines the highest latency allowed for individual players when placing a game session. When a latency policy is in force, a game session cannot be placed with any fleet in a Region where a player reports latency higher than the cap. Latency policies are only enforced when the placement request contains player latency information.

" }, "PlayerLatencyPolicyList":{ "type":"list", @@ -4191,19 +4389,23 @@ "members":{ "PlayerSessionId":{ "shape":"PlayerSessionId", - "documentation":"

Unique identifier for a player session.

" + "documentation":"

A unique identifier for a player session.

" }, "PlayerId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a player that is associated with this player session.

" + "documentation":"

A unique identifier for a player that is associated with this player session.

" }, "GameSessionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the game session that the player session is connected to.

" + "documentation":"

A unique identifier for the game session that the player session is connected to.

" }, "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that the player's game session is running on.

" + "documentation":"

A unique identifier for a fleet that the player's game session is running on.

" + }, + "FleetArn":{ + "shape":"ArnStringModel", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that the player's game session is running on.

" }, "CreationTime":{ "shape":"Timestamp", @@ -4219,9 +4421,12 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number.

" + }, + "DnsName":{ + "shape":"DnsName", + "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

  • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

  • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, - "DnsName":{"shape":"DnsName"}, "Port":{ "shape":"PortNumber", "documentation":"

Port number for the game session. To connect to a Amazon GameLift server process, an app needs both the IP address and port number.

" @@ -4294,11 +4499,11 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a scaling policy. Policy names do not need to be unique. A fleet can have only one scaling policy with the same name.

" + "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique. A fleet can have only one scaling policy with the same name.

" }, "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to apply this policy to. The fleet cannot be in any of the following statuses: ERROR or DELETING.

" + "documentation":"

A unique identifier for a fleet to apply this policy to. You can use either the fleet ID or ARN value. The fleet cannot be in any of the following statuses: ERROR or DELETING.

" }, "ScalingAdjustment":{ "shape":"Integer", @@ -4306,7 +4511,7 @@ }, "ScalingAdjustmentType":{ "shape":"ScalingAdjustmentType", - "documentation":"

Type of adjustment to make to a fleet's instance count (see FleetCapacity):

  • ChangeInCapacity -- add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.

  • ExactCapacity -- set the instance count to the scaling adjustment value.

  • PercentChangeInCapacity -- increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down; for example, a value of \"-10\" scales the fleet down by 10%.

" + "documentation":"

The type of adjustment to make to a fleet's instance count (see FleetCapacity):

  • ChangeInCapacity -- add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.

  • ExactCapacity -- set the instance count to the scaling adjustment value.

  • PercentChangeInCapacity -- increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down; for example, a value of \"-10\" scales the fleet down by 10%.

" }, "Threshold":{ "shape":"Double", @@ -4326,11 +4531,11 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

Type of scaling policy to create. For a target-based policy, set the parameter MetricName to 'PercentAvailableGameSessions' and specify a TargetConfiguration. For a rule-based policy set the following parameters: MetricName, ComparisonOperator, Threshold, EvaluationPeriods, ScalingAdjustmentType, and ScalingAdjustment.

" + "documentation":"

The type of scaling policy to create. For a target-based policy, set the parameter MetricName to 'PercentAvailableGameSessions' and specify a TargetConfiguration. For a rule-based policy set the following parameters: MetricName, ComparisonOperator, Threshold, EvaluationPeriods, ScalingAdjustmentType, and ScalingAdjustment.

" }, "TargetConfiguration":{ "shape":"TargetConfiguration", - "documentation":"

Object that contains settings for a target-based scaling policy.

" + "documentation":"

The settings for a target-based scaling policy.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4340,7 +4545,7 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -4355,7 +4560,7 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build to get credentials for.

" + "documentation":"

A unique identifier for a build to get credentials for. You can use either the build ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4380,7 +4585,7 @@ "members":{ "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for the alias you want to resolve.

" + "documentation":"

The unique identifier of the alias that you want to retrieve a fleet ID for. You can use either the alias ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4390,7 +4595,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Fleet identifier that is associated with the requested alias.

" + "documentation":"

The fleet identifier that the alias is pointing to.

" + }, + "FleetArn":{ + "shape":"ArnStringModel", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource that this alias points to.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -4400,32 +4609,32 @@ "members":{ "NewGameSessionsPerCreator":{ "shape":"WholeNumber", - "documentation":"

Maximum number of game sessions that an individual can create during the policy period.

" + "documentation":"

The maximum number of game sessions that an individual can create during the policy period.

" }, "PolicyPeriodInMinutes":{ "shape":"WholeNumber", - "documentation":"

Time span used in evaluating the resource creation limit policy.

" + "documentation":"

The time span used in evaluating the resource creation limit policy.

" } }, - "documentation":"

Policy that limits the number of game sessions a player can create on the same fleet. This optional policy gives game owners control over how players can consume available game server resources. A resource creation policy makes the following statement: \"An individual player can create a maximum number of new game sessions within a specified time period\".

The policy is evaluated when a player tries to create a new game session. For example, with a policy of 10 new game sessions and a time period of 60 minutes, on receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than 10 game sessions in the past 60 minutes.

" + "documentation":"

A policy that limits the number of game sessions a player can create on the same fleet. This optional policy gives game owners control over how players can consume available game server resources. A resource creation policy makes the following statement: \"An individual player can create a maximum number of new game sessions within a specified time period\".

The policy is evaluated when a player tries to create a new game session. For example: Assume you have a policy of 10 new game sessions and a time period of 60 minutes. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than 10 game sessions in the past 60 minutes.

" }, "RoutingStrategy":{ "type":"structure", "members":{ "Type":{ "shape":"RoutingStrategyType", - "documentation":"

Type of routing strategy.

Possible routing types include the following:

  • SIMPLE -- The alias resolves to one specific fleet. Use this type when routing to active fleets.

  • TERMINAL -- The alias does not resolve to a fleet but instead can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException with the RoutingStrategy message embedded.

" + "documentation":"

The type of routing strategy for the alias.

Possible routing types include the following:

  • SIMPLE - The alias resolves to one specific fleet. Use this type when routing to active fleets.

  • TERMINAL - The alias does not resolve to a fleet but instead can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException with the RoutingStrategy message embedded.

" }, "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that the alias points to.

" + "documentation":"

The unique identifier for a fleet that the alias points to. This value is the fleet ID, not the fleet ARN.

" }, "Message":{ "shape":"FreeText", - "documentation":"

Message text to be used with a terminal routing strategy.

" + "documentation":"

The message text to be used with a terminal routing strategy.

" } }, - "documentation":"

Routing configuration for a fleet alias.

" + "documentation":"

The routing configuration for a fleet alias.

" }, "RoutingStrategyType":{ "type":"string", @@ -4449,40 +4658,40 @@ "members":{ "ServerProcesses":{ "shape":"ServerProcessList", - "documentation":"

Collection of server process configurations that describe which server processes to run on each instance in a fleet.

" + "documentation":"

A collection of server process configurations that describe which server processes to run on each instance in a fleet.

" }, "MaxConcurrentGameSessionActivations":{ "shape":"MaxConcurrentGameSessionActivations", - "documentation":"

Maximum number of game sessions with status ACTIVATING to allow on an instance simultaneously. This setting limits the amount of instance resources that can be used for new game activations at any one time.

" + "documentation":"

The maximum number of game sessions with status ACTIVATING to allow on an instance simultaneously. This setting limits the amount of instance resources that can be used for new game activations at any one time.

" }, "GameSessionActivationTimeoutSeconds":{ "shape":"GameSessionActivationTimeoutSeconds", - "documentation":"

Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" + "documentation":"

The maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" } }, - "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instance in the fleet starts the specified server processes and continues to start new processes as existing processes end. An instance regularly checks for an updated run-time configuration.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Learn more about Running Multiple Processes on a Fleet .

A Amazon GameLift instance is limited to 50 processes running simultaneously. To calculate the total number of processes in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object.

" + "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instance in the fleet starts the specified server processes and continues to start new processes as existing processes end. Each instance regularly checks for an updated runtime configuration.

The runtime configuration enables the instances in a fleet to run multiple processes simultaneously. Learn more about Running Multiple Processes on a Fleet .

A Amazon GameLift instance is limited to 50 processes running simultaneously. To calculate the total number of processes in a runtime configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object.

" }, "S3Location":{ "type":"structure", "members":{ "Bucket":{ "shape":"NonEmptyString", - "documentation":"

Amazon S3 bucket identifier. This is the name of the S3 bucket.

" + "documentation":"

An Amazon S3 bucket identifier. This is the name of the S3 bucket.

" }, "Key":{ "shape":"NonEmptyString", - "documentation":"

Name of the zip file containing the build files or script files.

" + "documentation":"

The name of the zip file that contains the build files or script files.

" }, "RoleArn":{ "shape":"NonEmptyString", - "documentation":"

Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access the S3 bucket.

" + "documentation":"

The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access the S3 bucket.

" }, "ObjectVersion":{ "shape":"NonEmptyString", - "documentation":"

Version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses this information when retrieving files from an S3 bucket that you own. Use this parameter to specify a specific version of the file; if not set, the latest version of the file is retrieved.

" + "documentation":"

The version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses this information when retrieving files from an S3 bucket that you own. Use this parameter to specify a specific version of the file. If not set, the latest version of the file is retrieved.

" } }, - "documentation":"

Location in Amazon Simple Storage Service (Amazon S3) where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

" + "documentation":"

The location in Amazon S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

" }, "ScalingAdjustmentType":{ "type":"string", @@ -4497,11 +4706,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that is associated with this scaling policy.

" + "documentation":"

A unique identifier for a fleet that is associated with this scaling policy.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" }, "Status":{ "shape":"ScalingStatusType", @@ -4513,7 +4722,7 @@ }, "ScalingAdjustmentType":{ "shape":"ScalingAdjustmentType", - "documentation":"

Type of adjustment to make to a fleet's instance count (see FleetCapacity):

  • ChangeInCapacity -- add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.

  • ExactCapacity -- set the instance count to the scaling adjustment value.

  • PercentChangeInCapacity -- increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down.

" + "documentation":"

The type of adjustment to make to a fleet's instance count (see FleetCapacity):

  • ChangeInCapacity -- add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.

  • ExactCapacity -- set the instance count to the scaling adjustment value.

  • PercentChangeInCapacity -- increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down.

" }, "ComparisonOperator":{ "shape":"ComparisonOperatorType", @@ -4533,11 +4742,11 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

Type of scaling policy to create. For a target-based policy, set the parameter MetricName to 'PercentAvailableGameSessions' and specify a TargetConfiguration. For a rule-based policy set the following parameters: MetricName, ComparisonOperator, Threshold, EvaluationPeriods, ScalingAdjustmentType, and ScalingAdjustment.

" + "documentation":"

The type of scaling policy to create. For a target-based policy, set the parameter MetricName to 'PercentAvailableGameSessions' and specify a TargetConfiguration. For a rule-based policy set the following parameters: MetricName, ComparisonOperator, Threshold, EvaluationPeriods, ScalingAdjustmentType, and ScalingAdjustment.

" }, "TargetConfiguration":{ "shape":"TargetConfiguration", - "documentation":"

Object that contains settings for a target-based scaling policy.

" + "documentation":"

The settings for a target-based scaling policy.

" } }, "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

" @@ -4563,31 +4772,39 @@ "members":{ "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script

" + "documentation":"

A unique identifier for a Realtime script

" + }, + "ScriptArn":{ + "shape":"ScriptArn", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the ScriptId value.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a script. Script names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a script. Script names do not need to be unique.

" }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

Version that is associated with a build or script. Version strings do not need to be unique.

" + "documentation":"

The version that is associated with a build or script. Version strings do not need to be unique.

" }, "SizeOnDisk":{ "shape":"PositiveLong", - "documentation":"

File size of the uploaded Realtime script, expressed in bytes. When files are uploaded from an S3 location, this value remains at \"0\".

" + "documentation":"

The file size of the uploaded Realtime script, expressed in bytes. When files are uploaded from an S3 location, this value remains at \"0\".

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "StorageLocation":{"shape":"S3Location"} }, "documentation":"

Properties describing a Realtime script.

Related operations

" }, + "ScriptArn":{ + "type":"string", + "pattern":"^arn:.*:script\\/script-\\S+" + }, "ScriptId":{ "type":"string", - "pattern":"^script-\\S+|^arn:.*script-\\S+" + "pattern":"^script-\\S+|^arn:.*:script\\/script-\\S+" }, "ScriptList":{ "type":"list", @@ -4598,11 +4815,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to search for active game sessions. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for a fleet to search for active game sessions. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for an alias associated with the fleet to search for active game sessions. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for an alias associated with the fleet to search for active game sessions. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "FilterExpression":{ "shape":"NonZeroAndMaxString", @@ -4614,7 +4831,7 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. The maximum number of results returned is 20, even if this value is not set or is set higher than 20.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. The maximum number of results returned is 20, even if this value is not set or is set higher than 20.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -4628,7 +4845,7 @@ "members":{ "GameSessions":{ "shape":"GameSessionList", - "documentation":"

Collection of objects containing game session properties for each session matching the request.

" + "documentation":"

A collection of objects containing game session properties for each session matching the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -4646,15 +4863,15 @@ "members":{ "LaunchPath":{ "shape":"NonZeroAndMaxString", - "documentation":"

Location of the server executable in a custom game build or the name of the Realtime script file that contains the Init() function. Game builds and Realtime scripts are installed on instances at the root:

  • Windows (for custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

  • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

" + "documentation":"

The location of the server executable in a custom game build or the name of the Realtime script file that contains the Init() function. Game builds and Realtime scripts are installed on instances at the root:

  • Windows (for custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

  • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

" }, "Parameters":{ "shape":"NonZeroAndMaxString", - "documentation":"

Optional list of parameters to pass to the server executable or Realtime script on launch.

" + "documentation":"

An optional list of parameters to pass to the server executable or Realtime script on launch.

" }, "ConcurrentExecutions":{ "shape":"PositiveInteger", - "documentation":"

Number of server processes using this configuration to run concurrently on an instance.

" + "documentation":"

The number of server processes that use this configuration to run concurrently on an instance.

" } }, "documentation":"

A set of instructions for launching server processes on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instruction set identifies the location of the custom game build executable or Realtime launch script, optional launch parameters, and the number of server processes with this configuration to maintain concurrently on the instance. Server process configurations make up a fleet's RuntimeConfiguration .

" @@ -4680,7 +4897,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet

" + "documentation":"

A unique identifier for a fleet to start actions on. You can use either the fleet ID or ARN value.

" }, "Actions":{ "shape":"FleetActionList", @@ -4703,11 +4920,11 @@ "members":{ "PlacementId":{ "shape":"IdStringModel", - "documentation":"

Unique identifier to assign to the new game session placement. This value is developer-defined. The value must be unique across all regions and cannot be reused unless you are resubmitting a canceled or timed-out placement request.

" + "documentation":"

A unique identifier to assign to the new game session placement. This value is developer-defined. The value must be unique across all Regions and cannot be reused unless you are resubmitting a canceled or timed-out placement request.

" }, "GameSessionQueueName":{ "shape":"GameSessionQueueName", - "documentation":"

Name of the queue to use to place the new game session.

" + "documentation":"

Name of the queue to use to place the new game session. You can use either the qieue name or ARN value.

" }, "GameProperties":{ "shape":"GamePropertyList", @@ -4715,15 +4932,15 @@ }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Maximum number of players that can be connected simultaneously to the game session.

" + "documentation":"

The maximum number of players that can be connected simultaneously to the game session.

" }, "GameSessionName":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a game session. Session names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a game session. Session names do not need to be unique.

" }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.

" + "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.

" }, "DesiredPlayerSessions":{ "shape":"DesiredPlayerSessionList", @@ -4756,19 +4973,19 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the match backfill ticket status and retrieve match results.

" + "documentation":"

A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the match backfill ticket status and retrieve match results.

" }, "ConfigurationName":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Name of the matchmaker to use for this request. The name of the matchmaker that was used with the original game session is listed in the GameSession object, MatchmakerData property. This property contains a matchmaking configuration ARN value, which includes the matchmaker name. (In the ARN value \"arn:aws:gamelift:us-west-2:111122223333:matchmakingconfiguration/MM-4v4\", the matchmaking configuration name is \"MM-4v4\".) Use only the name for this parameter.

" + "shape":"MatchmakingConfigurationName", + "documentation":"

Name of the matchmaker to use for this request. You can use either the configuration name or ARN value. The ARN of the matchmaker that was used with the original game session is listed in the GameSession object, MatchmakerData property.

" }, "GameSessionArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it. This is the same as the game session ID.

" }, "Players":{ "shape":"PlayerList", - "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

  • PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.

  • LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the region that the game session is currently in. Do not include latency values for any other region.

" + "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

  • PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.

  • LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the Region that the game session is currently in. Do not include latency values for any other Region.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4792,11 +5009,11 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the matchmaking ticket status and retrieve match results.

" + "documentation":"

A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the matchmaking ticket status and retrieve match results.

" }, "ConfigurationName":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Name of the matchmaking configuration to use for this request. Matchmaking configurations must exist in the same region as this request.

" + "shape":"MatchmakingConfigurationName", + "documentation":"

Name of the matchmaking configuration to use for this request. Matchmaking configurations must exist in the same Region as this request. You can use either the configuration name or ARN value.

" }, "Players":{ "shape":"PlayerList", @@ -4824,7 +5041,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet

" + "documentation":"

A unique identifier for a fleet to stop actions on. You can use either the fleet ID or ARN value.

" }, "Actions":{ "shape":"FleetActionList", @@ -4843,7 +5060,7 @@ "members":{ "PlacementId":{ "shape":"IdStringModel", - "documentation":"

Unique identifier for a game session placement to cancel.

" + "documentation":"

A unique identifier for a game session placement to cancel.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4864,7 +5081,7 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking ticket.

" + "documentation":"

A unique identifier for a matchmaking ticket.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4884,6 +5101,76 @@ "member":{"shape":"NonZeroAndMaxString"} }, "StringModel":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key for a developer-defined key:value pair for tagging an AWS resource.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value for a developer-defined key:value pair for tagging an AWS resource.

" + } + }, + "documentation":"

A label that can be assigned to a GameLift resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to and uniquely identifies the GameLift resource that you want to assign tags to. GameLift resource ARNs are included in the data object for the resource, which can be retrieved by calling a List or Describe action for the resource type.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of one or more tags to assign to the specified GameLift resource. Tags are developer-defined and structured as key-value pairs. The maximum tag limit may be lower than stated. See Tagging AWS Resources for actual tagging limits.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TaggingFailedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

The requested tagging operation did not succeed. This may be due to invalid tag format or the maximum tag limit may have been exceeded. Resolve the issue before retrying.

", + "exception":true + }, "TargetConfiguration":{ "type":"structure", "required":["TargetValue"], @@ -4917,28 +5204,50 @@ "members":{ "Message":{"shape":"NonEmptyString"} }, - "documentation":"

The requested operation is not supported in the region specified.

", + "documentation":"

The requested operation is not supported in the Region specified.

", "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to and uniquely identifies the GameLift resource that you want to remove tags from. GameLift resource ARNs are included in the data object for the resource, which can be retrieved by calling a List or Describe action for the resource type.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of one or more tags to remove from the specified GameLift resource. Tags are developer-defined and structured as key-value pairs.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateAliasInput":{ "type":"structure", "required":["AliasId"], "members":{ "AliasId":{ "shape":"AliasId", - "documentation":"

Unique identifier for a fleet alias. Specify the alias you want to update.

" + "documentation":"

A unique identifier for the alias that you want to update. You can use either the alias ID or ARN value.

" }, "Name":{ "shape":"NonBlankAndLengthConstraintString", - "documentation":"

Descriptive label that is associated with an alias. Alias names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with an alias. Alias names do not need to be unique.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Human-readable description of an alias.

" + "documentation":"

A human-readable description of the alias.

" }, "RoutingStrategy":{ "shape":"RoutingStrategy", - "documentation":"

Object that specifies the fleet and routing type to use for the alias.

" + "documentation":"

The routing configuration, including routing type and fleet target, for the alias.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4948,7 +5257,7 @@ "members":{ "Alias":{ "shape":"Alias", - "documentation":"

Object that contains the updated alias configuration.

" + "documentation":"

The updated alias resource.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -4959,15 +5268,15 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

Unique identifier for a build to update.

" + "documentation":"

A unique identifier for a build to update. You can use either the build ID or ARN value.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a build. Build names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a build. Build names do not need to be unique.

" }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

Version that is associated with a build or script. Version strings do not need to be unique.

" + "documentation":"

Version information that is associated with a build or script. Version strings do not need to be unique.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4977,7 +5286,7 @@ "members":{ "Build":{ "shape":"Build", - "documentation":"

Object that contains the updated build record.

" + "documentation":"

The updated build record.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -4988,11 +5297,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to update attribute metadata for.

" + "documentation":"

A unique identifier for a fleet to update attribute metadata for. You can use either the fleet ID or ARN value.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a fleet. Fleet names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a fleet. Fleet names do not need to be unique.

" }, "Description":{ "shape":"NonZeroAndMaxString", @@ -5018,7 +5327,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that was updated.

" + "documentation":"

A unique identifier for a fleet that was updated. Use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5029,7 +5338,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to update capacity for.

" + "documentation":"

A unique identifier for a fleet to update capacity for. You can use either the fleet ID or ARN value.

" }, "DesiredInstances":{ "shape":"WholeNumber", @@ -5037,11 +5346,11 @@ }, "MinSize":{ "shape":"WholeNumber", - "documentation":"

Minimum value allowed for the fleet's instance count. Default if not set is 0.

" + "documentation":"

The minimum value allowed for the fleet's instance count. Default if not set is 0.

" }, "MaxSize":{ "shape":"WholeNumber", - "documentation":"

Maximum value allowed for the fleet's instance count. Default if not set is 1.

" + "documentation":"

The maximum value allowed for the fleet's instance count. Default if not set is 1.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5051,7 +5360,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that was updated.

" + "documentation":"

A unique identifier for a fleet that was updated.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5062,15 +5371,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to update port settings for.

" + "documentation":"

A unique identifier for a fleet to update port settings for. You can use either the fleet ID or ARN value.

" }, "InboundPermissionAuthorizations":{ "shape":"IpPermissionsList", - "documentation":"

Collection of port settings to be added to the fleet record.

" + "documentation":"

A collection of port settings to be added to the fleet record.

" }, "InboundPermissionRevocations":{ "shape":"IpPermissionsList", - "documentation":"

Collection of port settings to be removed from the fleet record.

" + "documentation":"

A collection of port settings to be removed from the fleet record.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5080,7 +5389,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet that was updated.

" + "documentation":"

A unique identifier for a fleet that was updated.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5091,15 +5400,15 @@ "members":{ "GameSessionId":{ "shape":"ArnStringModel", - "documentation":"

Unique identifier for the game session to update.

" + "documentation":"

A unique identifier for the game session to update.

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Maximum number of players that can be connected simultaneously to the game session.

" + "documentation":"

The maximum number of players that can be connected simultaneously to the game session.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a game session. Session names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a game session. Session names do not need to be unique.

" }, "PlayerSessionCreationPolicy":{ "shape":"PlayerSessionCreationPolicy", @@ -5117,7 +5426,7 @@ "members":{ "GameSession":{ "shape":"GameSession", - "documentation":"

Object that contains the updated game session metadata.

" + "documentation":"

The updated game session metadata.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5128,19 +5437,19 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" + "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region. You can use either the queue ID or ARN value.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" + "documentation":"

The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

Collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, it is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement. When updating policies, provide a complete collection of policies.

" + "documentation":"

A collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, the policy is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement. When updating policies, provide a complete collection of policies.

" }, "Destinations":{ "shape":"GameSessionQueueDestinationList", - "documentation":"

List of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order. When updating this list, provide a complete list of destinations.

" + "documentation":"

A list of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order. When updating this list, provide a complete list of destinations.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5150,7 +5459,7 @@ "members":{ "GameSessionQueue":{ "shape":"GameSessionQueue", - "documentation":"

Object that describes the newly updated game session queue.

" + "documentation":"

An object that describes the newly updated game session queue.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5160,40 +5469,40 @@ "required":["Name"], "members":{ "Name":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking configuration to update.

" + "shape":"MatchmakingConfigurationName", + "documentation":"

A unique identifier for a matchmaking configuration to update. You can use either the configuration name or ARN value.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with matchmaking configuration.

" + "documentation":"

A descriptive label that is associated with matchmaking configuration.

" }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>:<aws account>:gamesessionqueue/<queue name>. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", - "documentation":"

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

" + "documentation":"

The maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.

" }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", - "documentation":"

Flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + "documentation":"

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" }, "RuleSetName":{ - "shape":"MatchmakingIdStringModel", - "documentation":"

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

" + "shape":"MatchmakingRuleSetName", + "documentation":"

A unique identifier for a matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.

" }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" + "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

Number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -5201,15 +5510,15 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

Method used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "documentation":"

The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5219,7 +5528,7 @@ "members":{ "Configuration":{ "shape":"MatchmakingConfiguration", - "documentation":"

Object that describes the updated matchmaking configuration.

" + "documentation":"

The updated matchmaking configuration.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5233,11 +5542,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to update run-time configuration for.

" + "documentation":"

A unique identifier for a fleet to update runtime configuration for. You can use either the fleet ID or ARN value.

" }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime Servers script. The run-time configuration lists the types of server processes to run on an instance and includes the following configuration settings: the server executable or launch script file, launch parameters, and the number of processes to run concurrently on each instance. A CreateFleet request must include a run-time configuration with at least one server process configuration.

" + "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime Servers script. The runtime configuration lists the types of server processes to run on an instance and includes the following configuration settings: the server executable or launch script file, launch parameters, and the number of processes to run concurrently on each instance. A CreateFleet request must include a runtime configuration with at least one server process configuration.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5247,7 +5556,7 @@ "members":{ "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

The run-time configuration currently in force. If the update was successful, this object matches the one in the request.

" + "documentation":"

The runtime configuration currently in force. If the update was successful, this object matches the one in the request.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5258,23 +5567,23 @@ "members":{ "ScriptId":{ "shape":"ScriptId", - "documentation":"

Unique identifier for a Realtime script to update.

" + "documentation":"

A unique identifier for a Realtime script to update. You can use either the script ID or ARN value.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

Descriptive label that is associated with a script. Script names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a script. Script names do not need to be unique.

" }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

Version that is associated with a build or script. Version strings do not need to be unique.

" + "documentation":"

The version that is associated with a build or script. Version strings do not need to be unique.

" }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" + "documentation":"

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" }, "ZipFile":{ "shape":"ZipBlob", - "documentation":"

Data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB.

When using the AWS CLI tool to create a script, this parameter is set to the zip file name. It must be prepended with the string \"fileb://\" to indicate that the file data is a binary object. For example: --zip-file fileb://myRealtimeScript.zip.

" + "documentation":"

A data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB.

When using the AWS CLI tool to create a script, this parameter is set to the zip file name. It must be prepended with the string \"fileb://\" to indicate that the file data is a binary object. For example: --zip-file fileb://myRealtimeScript.zip.

" } } }, @@ -5293,7 +5602,7 @@ "members":{ "RuleSetBody":{ "shape":"RuleSetBody", - "documentation":"

Collection of matchmaking rules to validate, formatted as a JSON string.

" + "documentation":"

A collection of matchmaking rules to validate, formatted as a JSON string.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5303,7 +5612,7 @@ "members":{ "Valid":{ "shape":"BooleanModel", - "documentation":"

Response indicating whether the rule set is valid.

" + "documentation":"

A response indicating whether the rule set is valid.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5313,7 +5622,7 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", @@ -5321,7 +5630,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" }, "CreationTime":{ "shape":"Timestamp", @@ -5343,7 +5652,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

" + "documentation":"

A unique identifier for a fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

" + }, + "FleetArn":{ + "shape":"ArnStringModel", + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource for this connection.

" }, "IpV4CidrBlock":{ "shape":"NonZeroAndMaxString", @@ -5351,19 +5664,19 @@ }, "VpcPeeringConnectionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier that is automatically assigned to the connection record. This ID is referenced in VPC peering connection events, and is used when deleting a connection with DeleteVpcPeeringConnection.

" + "documentation":"

A unique identifier that is automatically assigned to the connection record. This ID is referenced in VPC peering connection events, and is used when deleting a connection with DeleteVpcPeeringConnection.

" }, "Status":{ "shape":"VpcPeeringConnectionStatus", - "documentation":"

Object that contains status information about the connection. Status indicates if a connection is pending, successful, or failed.

" + "documentation":"

The status information about the connection. Status indicates if a connection is pending, successful, or failed.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" }, "GameLiftVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the VPC that contains the Amazon GameLift fleet for this connection. This VPC is managed by Amazon GameLift and does not appear in your AWS account.

" + "documentation":"

A unique identifier for the VPC that contains the Amazon GameLift fleet for this connection. This VPC is managed by Amazon GameLift and does not appear in your AWS account.

" } }, "documentation":"

Represents a peering connection between a VPC on one of your AWS accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

" diff --git a/botocore/data/groundstation/2019-05-23/service-2.json b/botocore/data/groundstation/2019-05-23/service-2.json index e5d3cc13..e3859e01 100644 --- a/botocore/data/groundstation/2019-05-23/service-2.json +++ b/botocore/data/groundstation/2019-05-23/service-2.json @@ -1,1035 +1,676 @@ { - "version": "2.0", - "metadata": { - "apiVersion": "2019-05-23", - "endpointPrefix": "groundstation", - "jsonVersion": "1.1", - "protocol": "rest-json", - "serviceFullName": "AWS Ground Station", - "serviceId": "GroundStation", - "signatureVersion": "v4", - "signingName": "groundstation", - "uid": "groundstation-2019-05-23" + "version":"2.0", + "metadata":{ + "apiVersion":"2019-05-23", + "endpointPrefix":"groundstation", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Ground Station", + "serviceId":"GroundStation", + "signatureVersion":"v4", + "signingName":"groundstation", + "uid":"groundstation-2019-05-23" }, - "documentation": "

Welcome to the AWS Ground Station API Reference. AWS Ground Station is a fully managed service that\n enables you to control satellite communications, downlink and process satellite data, and\n scale your satellite operations efficiently and cost-effectively without having\n to build or manage your own ground station infrastructure.

", - "operations": { - "CancelContact": { - "name": "CancelContact", - "http": { - "method": "DELETE", - "requestUri": "/contact/{contactId}", - "responseCode": 200 + "operations":{ + "CancelContact":{ + "name":"CancelContact", + "http":{ + "method":"DELETE", + "requestUri":"/contact/{contactId}", + "responseCode":200 }, - "input": { - "shape": "CancelContactRequest" - }, - "output": { - "shape": "ContactIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"CancelContactRequest"}, + "output":{"shape":"ContactIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Cancels a contact with a specified contact ID.

", - "idempotent": true + "documentation":"

Cancels a contact with a specified contact ID.

", + "idempotent":true }, - "CreateConfig": { - "name": "CreateConfig", - "http": { - "method": "POST", - "requestUri": "/config", - "responseCode": 200 + "CreateConfig":{ + "name":"CreateConfig", + "http":{ + "method":"POST", + "requestUri":"/config", + "responseCode":200 }, - "input": { - "shape": "CreateConfigRequest" - }, - "output": { - "shape": "ConfigIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"CreateConfigRequest"}, + "output":{"shape":"ConfigIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Creates a Config with the specified configData parameters.

\n

Only one type of configData can be specified.

" + "documentation":"

Creates a Config with the specified configData parameters.

Only one type of configData can be specified.

" }, - "CreateDataflowEndpointGroup": { - "name": "CreateDataflowEndpointGroup", - "http": { - "method": "POST", - "requestUri": "/dataflowEndpointGroup", - "responseCode": 200 + "CreateDataflowEndpointGroup":{ + "name":"CreateDataflowEndpointGroup", + "http":{ + "method":"POST", + "requestUri":"/dataflowEndpointGroup", + "responseCode":200 }, - "input": { - "shape": "CreateDataflowEndpointGroupRequest" - }, - "output": { - "shape": "DataflowEndpointGroupIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"CreateDataflowEndpointGroupRequest"}, + "output":{"shape":"DataflowEndpointGroupIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Creates a DataflowEndpoint group containing the specified list of DataflowEndpoint objects.

\n

The name field in each endpoint is used in your mission profile DataflowEndpointConfig \n to specify which endpoints to use during a contact.

\n

When a contact uses multiple DataflowEndpointConfig objects, each Config \n must match a DataflowEndpoint in the same group.

" + "documentation":"

Creates a DataflowEndpoint group containing the specified list of DataflowEndpoint objects.

The name field in each endpoint is used in your mission profile DataflowEndpointConfig to specify which endpoints to use during a contact.

When a contact uses multiple DataflowEndpointConfig objects, each Config must match a DataflowEndpoint in the same group.

" }, - "CreateMissionProfile": { - "name": "CreateMissionProfile", - "http": { - "method": "POST", - "requestUri": "/missionprofile", - "responseCode": 200 + "CreateMissionProfile":{ + "name":"CreateMissionProfile", + "http":{ + "method":"POST", + "requestUri":"/missionprofile", + "responseCode":200 }, - "input": { - "shape": "CreateMissionProfileRequest" - }, - "output": { - "shape": "MissionProfileIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"CreateMissionProfileRequest"}, + "output":{"shape":"MissionProfileIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Creates a mission profile.

\n

\n dataflowEdges is a list of lists of strings. Each lower level list of strings\n has two elements: a from ARN and a to ARN.

" + "documentation":"

Creates a mission profile.

dataflowEdges is a list of lists of strings. Each lower level list of strings has two elements: a from ARN and a to ARN.

" }, - "DeleteConfig": { - "name": "DeleteConfig", - "http": { - "method": "DELETE", - "requestUri": "/config/{configType}/{configId}", - "responseCode": 200 + "DeleteConfig":{ + "name":"DeleteConfig", + "http":{ + "method":"DELETE", + "requestUri":"/config/{configType}/{configId}", + "responseCode":200 }, - "input": { - "shape": "DeleteConfigRequest" - }, - "output": { - "shape": "ConfigIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"DeleteConfigRequest"}, + "output":{"shape":"ConfigIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Deletes a Config.

", - "idempotent": true + "documentation":"

Deletes a Config.

", + "idempotent":true }, - "DeleteDataflowEndpointGroup": { - "name": "DeleteDataflowEndpointGroup", - "http": { - "method": "DELETE", - "requestUri": "/dataflowEndpointGroup/{dataflowEndpointGroupId}", - "responseCode": 200 + "DeleteDataflowEndpointGroup":{ + "name":"DeleteDataflowEndpointGroup", + "http":{ + "method":"DELETE", + "requestUri":"/dataflowEndpointGroup/{dataflowEndpointGroupId}", + "responseCode":200 }, - "input": { - "shape": "DeleteDataflowEndpointGroupRequest" - }, - "output": { - "shape": "DataflowEndpointGroupIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"DeleteDataflowEndpointGroupRequest"}, + "output":{"shape":"DataflowEndpointGroupIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Deletes a dataflow endpoint group.

", - "idempotent": true + "documentation":"

Deletes a dataflow endpoint group.

", + "idempotent":true }, - "DeleteMissionProfile": { - "name": "DeleteMissionProfile", - "http": { - "method": "DELETE", - "requestUri": "/missionprofile/{missionProfileId}", - "responseCode": 200 + "DeleteMissionProfile":{ + "name":"DeleteMissionProfile", + "http":{ + "method":"DELETE", + "requestUri":"/missionprofile/{missionProfileId}", + "responseCode":200 }, - "input": { - "shape": "DeleteMissionProfileRequest" - }, - "output": { - "shape": "MissionProfileIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"DeleteMissionProfileRequest"}, + "output":{"shape":"MissionProfileIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Deletes a mission profile.

", - "idempotent": true + "documentation":"

Deletes a mission profile.

", + "idempotent":true }, - "DescribeContact": { - "name": "DescribeContact", - "http": { - "method": "GET", - "requestUri": "/contact/{contactId}", - "responseCode": 200 + "DescribeContact":{ + "name":"DescribeContact", + "http":{ + "method":"GET", + "requestUri":"/contact/{contactId}", + "responseCode":200 }, - "input": { - "shape": "DescribeContactRequest" - }, - "output": { - "shape": "DescribeContactResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"DescribeContactRequest"}, + "output":{"shape":"DescribeContactResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Describes an existing contact.

" + "documentation":"

Describes an existing contact.

" }, - "GetConfig": { - "name": "GetConfig", - "http": { - "method": "GET", - "requestUri": "/config/{configType}/{configId}", - "responseCode": 200 + "GetConfig":{ + "name":"GetConfig", + "http":{ + "method":"GET", + "requestUri":"/config/{configType}/{configId}", + "responseCode":200 }, - "input": { - "shape": "GetConfigRequest" - }, - "output": { - "shape": "GetConfigResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"GetConfigRequest"}, + "output":{"shape":"GetConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns Config information.

\n

Only one Config response can be returned.

" + "documentation":"

Returns Config information.

Only one Config response can be returned.

" }, - "GetDataflowEndpointGroup": { - "name": "GetDataflowEndpointGroup", - "http": { - "method": "GET", - "requestUri": "/dataflowEndpointGroup/{dataflowEndpointGroupId}", - "responseCode": 200 + "GetDataflowEndpointGroup":{ + "name":"GetDataflowEndpointGroup", + "http":{ + "method":"GET", + "requestUri":"/dataflowEndpointGroup/{dataflowEndpointGroupId}", + "responseCode":200 }, - "input": { - "shape": "GetDataflowEndpointGroupRequest" - }, - "output": { - "shape": "GetDataflowEndpointGroupResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"GetDataflowEndpointGroupRequest"}, + "output":{"shape":"GetDataflowEndpointGroupResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns the dataflow endpoint group.

" + "documentation":"

Returns the dataflow endpoint group.

" }, - "GetMissionProfile": { - "name": "GetMissionProfile", - "http": { - "method": "GET", - "requestUri": "/missionprofile/{missionProfileId}", - "responseCode": 200 + "GetMinuteUsage":{ + "name":"GetMinuteUsage", + "http":{ + "method":"POST", + "requestUri":"/minute-usage", + "responseCode":200 }, - "input": { - "shape": "GetMissionProfileRequest" - }, - "output": { - "shape": "GetMissionProfileResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"GetMinuteUsageRequest"}, + "output":{"shape":"GetMinuteUsageResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a mission profile.

" + "documentation":"

Returns the number of minutes used by account.

" }, - "ListConfigs": { - "name": "ListConfigs", - "http": { - "method": "GET", - "requestUri": "/config", - "responseCode": 200 + "GetMissionProfile":{ + "name":"GetMissionProfile", + "http":{ + "method":"GET", + "requestUri":"/missionprofile/{missionProfileId}", + "responseCode":200 }, - "input": { - "shape": "ListConfigsRequest" - }, - "output": { - "shape": "ListConfigsResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"GetMissionProfileRequest"}, + "output":{"shape":"GetMissionProfileResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of Config objects.

" + "documentation":"

Returns a mission profile.

" }, - "ListContacts": { - "name": "ListContacts", - "http": { - "method": "POST", - "requestUri": "/contacts", - "responseCode": 200 + "GetSatellite":{ + "name":"GetSatellite", + "http":{ + "method":"GET", + "requestUri":"/satellite/{satelliteId}", + "responseCode":200 }, - "input": { - "shape": "ListContactsRequest" - }, - "output": { - "shape": "ListContactsResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"GetSatelliteRequest"}, + "output":{"shape":"GetSatelliteResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of contacts.

\n

If statusList contains AVAILABLE, the request must include\n groundstation, missionprofileArn, and satelliteArn.\n

" + "documentation":"

Returns a satellite.

" }, - "ListDataflowEndpointGroups": { - "name": "ListDataflowEndpointGroups", - "http": { - "method": "GET", - "requestUri": "/dataflowEndpointGroup", - "responseCode": 200 + "ListConfigs":{ + "name":"ListConfigs", + "http":{ + "method":"GET", + "requestUri":"/config", + "responseCode":200 }, - "input": { - "shape": "ListDataflowEndpointGroupsRequest" - }, - "output": { - "shape": "ListDataflowEndpointGroupsResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListConfigsRequest"}, + "output":{"shape":"ListConfigsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of DataflowEndpoint groups.

" + "documentation":"

Returns a list of Config objects.

" }, - "ListMissionProfiles": { - "name": "ListMissionProfiles", - "http": { - "method": "GET", - "requestUri": "/missionprofile", - "responseCode": 200 + "ListContacts":{ + "name":"ListContacts", + "http":{ + "method":"POST", + "requestUri":"/contacts", + "responseCode":200 }, - "input": { - "shape": "ListMissionProfilesRequest" - }, - "output": { - "shape": "ListMissionProfilesResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListContactsRequest"}, + "output":{"shape":"ListContactsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of mission profiles.

" + "documentation":"

Returns a list of contacts.

If statusList contains AVAILABLE, the request must include groundStation, missionprofileArn, and satelliteArn.

" }, - "ReserveContact": { - "name": "ReserveContact", - "http": { - "method": "POST", - "requestUri": "/contact", - "responseCode": 200 + "ListDataflowEndpointGroups":{ + "name":"ListDataflowEndpointGroups", + "http":{ + "method":"GET", + "requestUri":"/dataflowEndpointGroup", + "responseCode":200 }, - "input": { - "shape": "ReserveContactRequest" - }, - "output": { - "shape": "ContactIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListDataflowEndpointGroupsRequest"}, + "output":{"shape":"ListDataflowEndpointGroupsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Reserves a contact using specified parameters.

" + "documentation":"

Returns a list of DataflowEndpoint groups.

" }, - "UpdateConfig": { - "name": "UpdateConfig", - "http": { - "method": "PUT", - "requestUri": "/config/{configType}/{configId}", - "responseCode": 200 + "ListGroundStations":{ + "name":"ListGroundStations", + "http":{ + "method":"GET", + "requestUri":"/groundstation", + "responseCode":200 }, - "input": { - "shape": "UpdateConfigRequest" - }, - "output": { - "shape": "ConfigIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListGroundStationsRequest"}, + "output":{"shape":"ListGroundStationsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Updates the Config used when scheduling contacts.

\n

Updating a Config will not update the execution parameters\n for existing future contacts scheduled with this Config.

", - "idempotent": true + "documentation":"

Returns a list of ground stations.

" }, - "UpdateMissionProfile": { - "name": "UpdateMissionProfile", - "http": { - "method": "PUT", - "requestUri": "/missionprofile/{missionProfileId}", - "responseCode": 200 + "ListMissionProfiles":{ + "name":"ListMissionProfiles", + "http":{ + "method":"GET", + "requestUri":"/missionprofile", + "responseCode":200 }, - "input": { - "shape": "UpdateMissionProfileRequest" - }, - "output": { - "shape": "MissionProfileIdResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListMissionProfilesRequest"}, + "output":{"shape":"ListMissionProfilesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Updates a mission profile.

\n

Updating a mission profile will not update the execution parameters\n for existing future contacts.

", - "idempotent": true + "documentation":"

Returns a list of mission profiles.

" }, - "GetMinuteUsage": { - "name": "GetMinuteUsage", - "http": { - "method": "POST", - "requestUri": "/minute-usage", - "responseCode": 200 + "ListSatellites":{ + "name":"ListSatellites", + "http":{ + "method":"GET", + "requestUri":"/satellite", + "responseCode":200 }, - "input": { - "shape": "GetMinuteUsageRequest" - }, - "output": { - "shape": "GetMinuteUsageResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListSatellitesRequest"}, + "output":{"shape":"ListSatellitesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns the number of minutes used by account.

" + "documentation":"

Returns a list of satellites.

" }, - "GetSatellite": { - "name": "GetSatellite", - "http": { - "method": "GET", - "requestUri": "/satellite/{satelliteId}", - "responseCode": 200 + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 }, - "input": { - "shape": "GetSatelliteRequest" - }, - "output": { - "shape": "GetSatelliteResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a satellite.

" + "documentation":"

Returns a list of tags for a specified resource.

" }, - "ListGroundStations": { - "name": "ListGroundStations", - "http": { - "method": "GET", - "requestUri": "/groundstation", - "responseCode": 200 + "ReserveContact":{ + "name":"ReserveContact", + "http":{ + "method":"POST", + "requestUri":"/contact", + "responseCode":200 }, - "input": { - "shape": "ListGroundStationsRequest" - }, - "output": { - "shape": "ListGroundStationsResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"ReserveContactRequest"}, + "output":{"shape":"ContactIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of ground stations.

" + "documentation":"

Reserves a contact using specified parameters.

" }, - "ListSatellites": { - "name": "ListSatellites", - "http": { - "method": "GET", - "requestUri": "/satellite", - "responseCode": 200 + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 }, - "input": { - "shape": "ListSatellitesRequest" - }, - "output": { - "shape": "ListSatellitesResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of satellites.

" + "documentation":"

Assigns a tag to a resource.

" }, - "ListTagsForResource": { - "name": "ListTagsForResource", - "http": { - "method": "GET", - "requestUri": "/tags/{resourceArn}", - "responseCode": 200 + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 }, - "input": { - "shape": "ListTagsForResourceRequest" - }, - "output": { - "shape": "ListTagsForResourceResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Returns a list of tags or a specified resource.

" + "documentation":"

Deassigns a resource tag.

", + "idempotent":true }, - "TagResource": { - "name": "TagResource", - "http": { - "method": "POST", - "requestUri": "/tags/{resourceArn}", - "responseCode": 200 + "UpdateConfig":{ + "name":"UpdateConfig", + "http":{ + "method":"PUT", + "requestUri":"/config/{configType}/{configId}", + "responseCode":200 }, - "input": { - "shape": "TagResourceRequest" - }, - "output": { - "shape": "TagResourceResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"UpdateConfigRequest"}, + "output":{"shape":"ConfigIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Assigns a tag to a resource.

" + "documentation":"

Updates the Config used when scheduling contacts.

Updating a Config will not update the execution parameters for existing future contacts scheduled with this Config.

", + "idempotent":true }, - "UntagResource": { - "name": "UntagResource", - "http": { - "method": "DELETE", - "requestUri": "/tags/{resourceArn}", - "responseCode": 200 + "UpdateMissionProfile":{ + "name":"UpdateMissionProfile", + "http":{ + "method":"PUT", + "requestUri":"/missionprofile/{missionProfileId}", + "responseCode":200 }, - "input": { - "shape": "UntagResourceRequest" - }, - "output": { - "shape": "UntagResourceResponse" - }, - "errors": [ - { - "shape": "DependencyException" - }, - { - "shape": "InvalidParameterException" - }, - { - "shape": "ResourceNotFoundException" - } + "input":{"shape":"UpdateMissionProfileRequest"}, + "output":{"shape":"MissionProfileIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DependencyException"}, + {"shape":"ResourceNotFoundException"} ], - "documentation": "

Deassigns a resource tag.

", - "idempotent": true + "documentation":"

Updates a mission profile.

Updating a mission profile will not update the execution parameters for existing future contacts.

", + "idempotent":true } }, - "shapes": { - "UpdateConfigRequest": { - "type": "structure", - "required": [ - "configData", - "configId", - "configType", - "name" - ], - "members": { - "configData": { - "shape": "ConfigTypeData", - "documentation": "

Parameters for a Config.

" - }, - "configId": { - "shape": "String", - "documentation": "

UUID of a Config.

", - "location": "uri", - "locationName": "configId" - }, - "configType": { - "shape": "ConfigCapabilityType", - "documentation": "

Type of a Config.

", - "location": "uri", - "locationName": "configType" - }, - "name": { - "shape": "SafeName", - "documentation": "

Name of a Config.

" + "shapes":{ + "AngleUnits":{ + "type":"string", + "enum":[ + "DEGREE_ANGLE", + "RADIAN" + ] + }, + "AntennaDownlinkConfig":{ + "type":"structure", + "required":["spectrumConfig"], + "members":{ + "spectrumConfig":{ + "shape":"SpectrumConfig", + "documentation":"

Object that describes a spectral Config.

" } }, - "documentation": "

" + "documentation":"

Information about how AWS Ground Station should configure an antenna for downlink during a contact.

" }, - "ConfigTypeData": { - "type": "structure", - "members": { - "antennaDownlinkConfig": { - "shape": "AntennaDownlinkConfig", - "documentation": "

Information about how AWS Ground Station should configure an antenna for downlink during a contact.

" - }, - "antennaDownlinkDemodDecodeConfig": { - "shape": "AntennaDownlinkDemodDecodeConfig", - "documentation": "

Information about how AWS Ground Station should configure an antenna for downlink demod decode during a contact.

" - }, - "antennaUplinkConfig": { - "shape": "AntennaUplinkConfig", - "documentation": "

Information about how AWS Ground Station should configure an antenna for uplink during a contact.

" - }, - "dataflowEndpointConfig": { - "shape": "DataflowEndpointConfig", - "documentation": "

Information about the dataflow endpoint Config.

" - }, - "trackingConfig": { - "shape": "TrackingConfig", - "documentation": "

Object that determines whether tracking should be used during a contact executed with this Config in the mission profile.

" - }, - "uplinkEchoConfig": { - "shape": "UplinkEchoConfig", - "documentation": "

Information about an uplink echo Config.

\n

Parameters from the AntennaUplinkConfig, corresponding to the specified AntennaUplinkConfigArn, are used when this UplinkEchoConfig is used in a contact.

" - } - }, - "documentation": "

Object containing the parameters for a Config.

\n

See the subtype definitions for what each type of Config contains.

" - }, - "noradSatelliteID": { - "type": "integer", - "min": 1, - "max": 99999 - }, - "GroundStationData": { - "type": "structure", - "members": { - "groundStationId": { - "shape": "String", - "documentation": "

ID of a ground station.

" - }, - "groundStationName": { - "shape": "String", - "documentation": "

Name of a ground station.

" - }, - "region": { - "shape": "String", - "documentation": "

Ground station Region.

" - } - }, - "documentation": "

Information about the ground station data.

" - }, - "GetConfigRequest": { - "type": "structure", - "required": [ - "configId", - "configType" - ], - "members": { - "configId": { - "shape": "String", - "documentation": "

UUID of a Config.

", - "location": "uri", - "locationName": "configId" - }, - "configType": { - "shape": "ConfigCapabilityType", - "documentation": "

Type of a Config.

", - "location": "uri", - "locationName": "configType" - } - }, - "documentation": "

" - }, - "GroundStationList": { - "type": "list", - "member": { - "shape": "GroundStationData" - } - }, - "SecurityGroupIdList": { - "type": "list", - "member": { - "shape": "String" - } - }, - "EndpointDetails": { - "type": "structure", - "members": { - "endpoint": { - "shape": "DataflowEndpoint", - "documentation": "

A dataflow endpoint.

" - }, - "securityDetails": { - "shape": "SecurityDetails", - "documentation": "

Endpoint security details.

" - } - }, - "documentation": "

Information about the endpoint details.

" - }, - "DataflowEndpointGroupArn": { - "type": "string" - }, - "GetMinuteUsageResponse": { - "type": "structure", - "members": { - "estimatedMinutesRemaining": { - "shape": "Integer", - "documentation": "

Estimated number of minutes remaining for an account, specific to the month being requested.

" - }, - "isReservedMinutesCustomer": { - "shape": "Boolean", - "documentation": "

Returns whether or not an account has signed up for the reserved minutes pricing plan, specific to the month being requested.

" - }, - "totalReservedMinuteAllocation": { - "shape": "Integer", - "documentation": "

Total number of reserved minutes allocated, specific to the month being requested.

" - }, - "totalScheduledMinutes": { - "shape": "Integer", - "documentation": "

Total scheduled minutes for an account, specific to the month being requested.

" - }, - "upcomingMinutesScheduled": { - "shape": "Integer", - "documentation": "

Upcoming minutes scheduled for an account, specific to the month being requested.

" - } - }, - "documentation": "

" - }, - "MissionProfileListItem": { - "type": "structure", - "members": { - "missionProfileArn": { - "shape": "MissionProfileArn", - "documentation": "

ARN of a mission profile.

" - }, - "missionProfileId": { - "shape": "String", - "documentation": "

ID of a mission profile.

" - }, - "name": { - "shape": "String", - "documentation": "

Name of a mission profile.

" - }, - "region": { - "shape": "String", - "documentation": "

Region of a mission profile.

" - } - }, - "documentation": "

Item in a list of mission profiles.

" - }, - "SatelliteList": { - "type": "list", - "member": { - "shape": "SatelliteListItem" - } - }, - "ListDataflowEndpointGroupsResponse": { - "type": "structure", - "members": { - "dataflowEndpointGroupList": { - "shape": "DataflowEndpointGroupList", - "documentation": "

A list of dataflow endpoint groups.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the response of a previous ListDataflowEndpointGroups call. Used to get the next page of results.

" - } - }, - "documentation": "

" - }, - "AntennaDownlinkDemodDecodeConfig": { - "type": "structure", - "required": [ + "AntennaDownlinkDemodDecodeConfig":{ + "type":"structure", + "required":[ "decodeConfig", "demodulationConfig", "spectrumConfig" ], - "members": { - "decodeConfig": { - "shape": "DecodeConfig", - "documentation": "

Information about the decode Config.

" + "members":{ + "decodeConfig":{ + "shape":"DecodeConfig", + "documentation":"

Information about the decode Config.

" }, - "demodulationConfig": { - "shape": "DemodulationConfig", - "documentation": "

Information about the demodulation Config.

" + "demodulationConfig":{ + "shape":"DemodulationConfig", + "documentation":"

Information about the demodulation Config.

" }, - "spectrumConfig": { - "shape": "SpectrumConfig", - "documentation": "

Information about the spectral Config.

" + "spectrumConfig":{ + "shape":"SpectrumConfig", + "documentation":"

Information about the spectral Config.

" } }, - "documentation": "

Information about how AWS Ground Station should configure an antenna for downlink demod decode during a contact.

" + "documentation":"

Information about how AWS Ground Station should configure an antenna for downlink demod decode during a contact.

" }, - "MissionProfileIdResponse": { - "type": "structure", - "members": { - "missionProfileId": { - "shape": "String", - "documentation": "

ID of a mission profile.

" - } - }, - "documentation": "

" - }, - "SubnetList": { - "type": "list", - "member": { - "shape": "String" - } - }, - "Polarization": { - "type": "string", - "enum": [ - "LEFT_HAND", - "NONE", - "RIGHT_HAND" - ] - }, - "ConfigList": { - "type": "list", - "member": { - "shape": "ConfigListItem" - } - }, - "AntennaUplinkConfig": { - "type": "structure", - "required": [ + "AntennaUplinkConfig":{ + "type":"structure", + "required":[ "spectrumConfig", "targetEirp" ], - "members": { - "spectrumConfig": { - "shape": "UplinkSpectrumConfig", - "documentation": "

Information about the uplink spectral Config.

" + "members":{ + "spectrumConfig":{ + "shape":"UplinkSpectrumConfig", + "documentation":"

Information about the uplink spectral Config.

" }, - "targetEirp": { - "shape": "Eirp", - "documentation": "

EIRP of the target.

" + "targetEirp":{ + "shape":"Eirp", + "documentation":"

EIRP of the target.

" } }, - "documentation": "

Information about the uplink Config of an antenna.

" + "documentation":"

Information about the uplink Config of an antenna.

" }, - "Integer": { - "type": "integer", - "box": true - }, - "AntennaDownlinkConfig": { - "type": "structure", - "required": [ - "spectrumConfig" - ], - "members": { - "spectrumConfig": { - "shape": "SpectrumConfig", - "documentation": "

Object that describes a spectral Config.

" - } - }, - "documentation": "

Information about how AWS Ground Station should configure an\n antenna for downlink during a contact.

" - }, - "Boolean": { - "type": "boolean", - "box": true - }, - "EndpointStatus": { - "type": "string", - "enum": [ - "created", - "creating", - "deleted", - "deleting", - "failed" + "BandwidthUnits":{ + "type":"string", + "enum":[ + "GHz", + "MHz", + "kHz" ] }, - "UplinkEchoConfig": { - "type": "structure", - "required": [ - "antennaUplinkConfigArn", - "enabled" - ], - "members": { - "antennaUplinkConfigArn": { - "shape": "ConfigArn", - "documentation": "

ARN of an uplink Config.

" + "Boolean":{ + "type":"boolean", + "box":true + }, + "CancelContactRequest":{ + "type":"structure", + "required":["contactId"], + "members":{ + "contactId":{ + "shape":"String", + "documentation":"

UUID of a contact.

", + "location":"uri", + "locationName":"contactId" + } + }, + "documentation":"

" + }, + "ConfigArn":{"type":"string"}, + "ConfigCapabilityType":{ + "type":"string", + "enum":[ + "antenna-downlink", + "antenna-downlink-demod-decode", + "antenna-uplink", + "dataflow-endpoint", + "tracking", + "uplink-echo" + ] + }, + "ConfigIdResponse":{ + "type":"structure", + "members":{ + "configArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of a Config.

" }, - "enabled": { - "shape": "Boolean", - "documentation": "

Whether or not an uplink Config is enabled.

" + "configId":{ + "shape":"String", + "documentation":"

UUID of a Config.

" + }, + "configType":{ + "shape":"ConfigCapabilityType", + "documentation":"

Type of a Config.

" } }, - "documentation": "

Information about an uplink echo Config.

\n

Parameters from the AntennaUplinkConfig, corresponding to the \n specified AntennaUplinkConfigArn, are used when this UplinkEchoConfig \n is used in a contact.

" + "documentation":"

" }, - "DecodeConfig": { - "type": "structure", - "required": [ - "unvalidatedJSON" - ], - "members": { - "unvalidatedJSON": { - "shape": "JsonString", - "documentation": "

Unvalidated JSON of a decode Config.

" + "ConfigList":{ + "type":"list", + "member":{"shape":"ConfigListItem"} + }, + "ConfigListItem":{ + "type":"structure", + "members":{ + "configArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of a Config.

" + }, + "configId":{ + "shape":"String", + "documentation":"

UUID of a Config.

" + }, + "configType":{ + "shape":"ConfigCapabilityType", + "documentation":"

Type of a Config.

" + }, + "name":{ + "shape":"String", + "documentation":"

Name of a Config.

" } }, - "documentation": "

Information about the decode Config.

" + "documentation":"

An item in a list of Config objects.

" }, - "DeleteDataflowEndpointGroupRequest": { - "type": "structure", - "required": [ - "dataflowEndpointGroupId" - ], - "members": { - "dataflowEndpointGroupId": { - "shape": "String", - "documentation": "

ID of a dataflow endpoint group.

", - "location": "uri", - "locationName": "dataflowEndpointGroupId" + "ConfigTypeData":{ + "type":"structure", + "members":{ + "antennaDownlinkConfig":{ + "shape":"AntennaDownlinkConfig", + "documentation":"

Information about how AWS Ground Station should configure an antenna for downlink during a contact.

" + }, + "antennaDownlinkDemodDecodeConfig":{ + "shape":"AntennaDownlinkDemodDecodeConfig", + "documentation":"

Information about how AWS Ground Station should configure an antenna for downlink demod decode during a contact.

" + }, + "antennaUplinkConfig":{ + "shape":"AntennaUplinkConfig", + "documentation":"

Information about how AWS Ground Station should configure an antenna for uplink during a contact.

" + }, + "dataflowEndpointConfig":{ + "shape":"DataflowEndpointConfig", + "documentation":"

Information about the dataflow endpoint Config.

" + }, + "trackingConfig":{ + "shape":"TrackingConfig", + "documentation":"

Object that determines whether tracking should be used during a contact executed with this Config in the mission profile.

" + }, + "uplinkEchoConfig":{ + "shape":"UplinkEchoConfig", + "documentation":"

Information about an uplink echo Config.

Parameters from the AntennaUplinkConfig, corresponding to the specified AntennaUplinkConfigArn, are used when this UplinkEchoConfig is used in a contact.

" } }, - "documentation": "

" + "documentation":"

Object containing the parameters of a Config.

See the subtype definitions for what each type of Config contains.

" }, - "ContactStatus": { - "type": "string", - "enum": [ + "ContactData":{ + "type":"structure", + "members":{ + "contactId":{ + "shape":"String", + "documentation":"

UUID of a contact.

" + }, + "contactStatus":{ + "shape":"ContactStatus", + "documentation":"

Status of a contact.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

End time of a contact.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

Error message of a contact.

" + }, + "groundStation":{ + "shape":"String", + "documentation":"

Name of a ground station.

" + }, + "maximumElevation":{ + "shape":"Elevation", + "documentation":"

Maximum elevation angle of a contact.

" + }, + "missionProfileArn":{ + "shape":"MissionProfileArn", + "documentation":"

ARN of a mission profile.

" + }, + "postPassEndTime":{ + "shape":"Timestamp", + "documentation":"

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + }, + "prePassStartTime":{ + "shape":"Timestamp", + "documentation":"

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" + }, + "region":{ + "shape":"String", + "documentation":"

Region of a contact.

" + }, + "satelliteArn":{ + "shape":"satelliteArn", + "documentation":"

ARN of a satellite.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

Start time of a contact.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a contact.

" + } + }, + "documentation":"

Data describing a contact.

" + }, + "ContactIdResponse":{ + "type":"structure", + "members":{ + "contactId":{ + "shape":"String", + "documentation":"

UUID of a contact.

" + } + }, + "documentation":"

" + }, + "ContactList":{ + "type":"list", + "member":{"shape":"ContactData"} + }, + "ContactStatus":{ + "type":"string", + "enum":[ "AVAILABLE", "AWS_CANCELLED", "CANCELLED", + "CANCELLING", "COMPLETED", "FAILED", "FAILED_TO_SCHEDULE", @@ -1040,1316 +681,1376 @@ "SCHEDULING" ] }, - "MissionProfileList": { - "type": "list", - "member": { - "shape": "MissionProfileListItem" - } - }, - "CreateConfigRequest": { - "type": "structure", - "required": [ + "CreateConfigRequest":{ + "type":"structure", + "required":[ "configData", "name" ], - "members": { - "configData": { - "shape": "ConfigTypeData", - "documentation": "

Parameters of a Config.

" + "members":{ + "configData":{ + "shape":"ConfigTypeData", + "documentation":"

Parameters of a Config.

" }, - "name": { - "shape": "SafeName", - "documentation": "

Name of a Config.

" + "name":{ + "shape":"SafeName", + "documentation":"

Name of a Config.

" }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a Config.

" + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a Config.

" } }, - "documentation": "

" + "documentation":"

" }, - "Frequency": { - "type": "structure", - "required": [ - "units", - "value" - ], - "members": { - "units": { - "shape": "FrequencyUnits", - "documentation": "

Frequency units.

" + "CreateDataflowEndpointGroupRequest":{ + "type":"structure", + "required":["endpointDetails"], + "members":{ + "endpointDetails":{ + "shape":"EndpointDetailsList", + "documentation":"

Endpoint details of each endpoint in the dataflow endpoint group.

" }, - "value": { - "shape": "Double", - "documentation": "

Frequency value.

" + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags of a dataflow endpoint group.

" } }, - "documentation": "

Object that describes the frequency.

" + "documentation":"

" }, - "UntagResourceResponse": { - "type": "structure", - "members": { }, - "documentation": "

" - }, - "ConfigIdResponse": { - "type": "structure", - "members": { - "configArn": { - "shape": "ConfigArn", - "documentation": "

ARN of a Config.

" - }, - "configId": { - "shape": "String", - "documentation": "

UUID of a Config.

" - }, - "configType": { - "shape": "ConfigCapabilityType", - "documentation": "

Type of a Config.

" - } - }, - "documentation": "

" - }, - "SecurityDetails": { - "type": "structure", - "required": [ - "roleArn", - "securityGroupIds", - "subnetIds" - ], - "members": { - "roleArn": { - "shape": "RoleArn", - "documentation": "

ARN to a role needed for connecting streams to your instances.

" - }, - "securityGroupIds": { - "shape": "SecurityGroupIdList", - "documentation": "

The security groups to attach to the elastic network interfaces.

" - }, - "subnetIds": { - "shape": "SubnetList", - "documentation": "

A list of subnets where AWS Ground Station places elastic network interfaces to send streams to your instances.

" - } - }, - "documentation": "

Information about endpoints.

" - }, - "TrackingConfig": { - "type": "structure", - "required": [ - "autotrack" - ], - "members": { - "autotrack": { - "shape": "Criticality", - "documentation": "

Current setting for autotrack.

" - } - }, - "documentation": "

Object that determines whether tracking should be used during a contact\n executed with this Config in the mission profile.

" - }, - "CreateDataflowEndpointGroupRequest": { - "type": "structure", - "required": [ - "endpointDetails" - ], - "members": { - "endpointDetails": { - "shape": "EndpointDetailsList", - "documentation": "

Endpoint details of each endpoint in the dataflow endpoint group.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags of a dataflow endpoint group.

" - } - }, - "documentation": "

" - }, - "Elevation": { - "type": "structure", - "required": [ - "unit", - "value" - ], - "members": { - "unit": { - "shape": "AngleUnits", - "documentation": "

Elevation angle units.

" - }, - "value": { - "shape": "Double", - "documentation": "

Elevation angle value.

" - } - }, - "documentation": "

Elevation angle of the satellite in the sky during a contact.

" - }, - "JsonString": { - "type": "string", - "min": 2, - "max": 8192 - }, - "GetSatelliteRequest": { - "type": "structure", - "required": [ - "satelliteId" - ], - "members": { - "satelliteId": { - "shape": "String", - "documentation": "

UUID of a satellite.

", - "location": "uri", - "locationName": "satelliteId" - } - }, - "documentation": "

" - }, - "CancelContactRequest": { - "type": "structure", - "required": [ - "contactId" - ], - "members": { - "contactId": { - "shape": "String", - "documentation": "

UUID of a contact.

", - "location": "uri", - "locationName": "contactId" - } - }, - "documentation": "

" - }, - "UplinkSpectrumConfig": { - "type": "structure", - "required": [ - "centerFrequency" - ], - "members": { - "centerFrequency": { - "shape": "Frequency", - "documentation": "

Center frequency of an uplink spectral Config.

" - }, - "polarization": { - "shape": "Polarization", - "documentation": "

Polarization of an uplink spectral Config.

" - } - }, - "documentation": "

Information about the uplink spectral Config.

" - }, - "UntagResourceRequest": { - "type": "structure", - "required": [ - "resourceArn", - "tagKeys" - ], - "members": { - "resourceArn": { - "shape": "String", - "documentation": "

ARN of a resource.

", - "location": "uri", - "locationName": "resourceArn" - }, - "tagKeys": { - "shape": "TagKeys", - "documentation": "

Keys of a resource tag.

", - "location": "querystring", - "locationName": "tagKeys" - } - }, - "documentation": "

" - }, - "satelliteArn": { - "type": "string" - }, - "GetMissionProfileResponse": { - "type": "structure", - "members": { - "contactPostPassDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" - }, - "contactPrePassDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" - }, - "dataflowEdges": { - "shape": "DataflowEdgeList", - "documentation": "

A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to \n Config.

" - }, - "minimumViableContactDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Smallest amount of time in seconds that you’d like to see for an available contact. AWS Ground Station will not present you with contacts shorter than this duration.

" - }, - "missionProfileArn": { - "shape": "MissionProfileArn", - "documentation": "

ARN of a mission profile.

" - }, - "missionProfileId": { - "shape": "String", - "documentation": "

ID of a mission profile.

" - }, - "name": { - "shape": "String", - "documentation": "

Name of a mission profile.

" - }, - "region": { - "shape": "String", - "documentation": "

Region of a mission profile.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a mission profile.

" - }, - "trackingConfigArn": { - "shape": "ConfigArn", - "documentation": "

ARN of a tracking Config.

" - } - }, - "documentation": "

" - }, - "ContactIdResponse": { - "type": "structure", - "members": { - "contactId": { - "shape": "String", - "documentation": "

UUID of a contact.

" - } - }, - "documentation": "

" - }, - "EndpointDetailsList": { - "type": "list", - "member": { - "shape": "EndpointDetails" - } - }, - "ListGroundStationsRequest": { - "type": "structure", - "members": { - "maxResults": { - "shape": "Integer", - "documentation": "

Maximum number of ground stations returned.

", - "location": "querystring", - "locationName": "maxResults" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token that can be supplied in the next call to get the next page of ground stations.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "

" - }, - "InvalidParameterException": { - "type": "structure", - "members": { - "message": { - "shape": "String" - }, - "parameterName": { - "shape": "String", - "documentation": "

" - } - }, - "documentation": "

One or more parameters are not valid.

", - "exception": true, - "error": { - "code": "InvalidParameterException", - "httpStatusCode": 431, - "senderFault": true - } - }, - "DependencyException": { - "type": "structure", - "members": { - "message": { - "shape": "String" - }, - "parameterName": { - "shape": "String", - "documentation": "

" - } - }, - "documentation": "

Dependency encountered an error.

", - "exception": true, - "error": { - "code": "DependencyException", - "httpStatusCode": 531, - "fault": true - } - }, - "DescribeContactRequest": { - "type": "structure", - "required": [ - "contactId" - ], - "members": { - "contactId": { - "shape": "String", - "documentation": "

UUID of a contact.

", - "location": "uri", - "locationName": "contactId" - } - }, - "documentation": "

" - }, - "ResourceNotFoundException": { - "type": "structure", - "members": { - "message": { - "shape": "String" - } - }, - "documentation": "

Resource was not found.

", - "exception": true, - "error": { - "code": "ResourceNotFoundException", - "httpStatusCode": 434, - "senderFault": true - } - }, - "Timestamp": { - "type": "timestamp" - }, - "DeleteConfigRequest": { - "type": "structure", - "required": [ - "configId", - "configType" - ], - "members": { - "configId": { - "shape": "String", - "documentation": "

UUID of a Config.

", - "location": "uri", - "locationName": "configId" - }, - "configType": { - "shape": "ConfigCapabilityType", - "documentation": "

Type of a Config.

", - "location": "uri", - "locationName": "configType" - } - }, - "documentation": "

" - }, - "BandwidthUnits": { - "type": "string", - "enum": [ - "GHz", - "MHz", - "kHz" - ] - }, - "SpectrumConfig": { - "type": "structure", - "required": [ - "bandwidth", - "centerFrequency" - ], - "members": { - "bandwidth": { - "shape": "FrequencyBandwidth", - "documentation": "

Bandwidth of a spectral Config.

" - }, - "centerFrequency": { - "shape": "Frequency", - "documentation": "

Center frequency of a spectral Config.

" - }, - "polarization": { - "shape": "Polarization", - "documentation": "

Polarization of a spectral Config.

" - } - }, - "documentation": "

Object that describes a spectral Config.

" - }, - "DemodulationConfig": { - "type": "structure", - "required": [ - "unvalidatedJSON" - ], - "members": { - "unvalidatedJSON": { - "shape": "JsonString", - "documentation": "

Unvalidated JSON of a demodulation Config.

" - } - }, - "documentation": "

Information about the demodulation Config.

" - }, - "ListMissionProfilesResponse": { - "type": "structure", - "members": { - "missionProfileList": { - "shape": "MissionProfileList", - "documentation": "

List of mission profiles

" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the response of a previous ListMissionProfiles call. Used to get the next page of results.

" - } - }, - "documentation": "

" - }, - "ListConfigsResponse": { - "type": "structure", - "members": { - "configList": { - "shape": "ConfigList", - "documentation": "

List of Config items.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the response of a previous ListConfigs call. Used to get the next page of results.

" - } - }, - "documentation": "

" - }, - "DataflowEdge": { - "type": "list", - "member": { - "shape": "ConfigArn" - }, - "min": 2, - "max": 2 - }, - "SafeName": { - "type": "string", - "min": 1, - "max": 256, - "pattern": "^[ a-zA-Z0-9_:-]+$" - }, - "Eirp": { - "type": "structure", - "required": [ - "units", - "value" - ], - "members": { - "units": { - "shape": "EirpUnits", - "documentation": "

Units of an EIRP.

" - }, - "value": { - "shape": "Double", - "documentation": "

Value of an EIRP.

" - } - }, - "documentation": "

Object that represents EIRP.

" - }, - "RoleArn": { - "type": "string" - }, - "ListMissionProfilesRequest": { - "type": "structure", - "members": { - "maxResults": { - "shape": "Integer", - "documentation": "

Maximum number of mission profiles returned.

", - "location": "querystring", - "locationName": "maxResults" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the request of a previous ListMissionProfiles call. Used to get the next page of results.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "

" - }, - "GetSatelliteResponse": { - "type": "structure", - "members": { - "dateCreated": { - "shape": "Timestamp", - "documentation": "

When a satellite was created.

" - }, - "lastUpdated": { - "shape": "Timestamp", - "documentation": "

When a satellite was last updated.

" - }, - "noradSatelliteID": { - "shape": "noradSatelliteID", - "documentation": "

NORAD satellite ID number.

" - }, - "satelliteArn": { - "shape": "satelliteArn", - "documentation": "

ARN of a satellite.

" - }, - "satelliteId": { - "shape": "Uuid", - "documentation": "

UUID of a satellite.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a satellite.

" - } - }, - "documentation": "

" - }, - "StatusList": { - "type": "list", - "member": { - "shape": "ContactStatus" - } - }, - "ListContactsRequest": { - "type": "structure", - "required": [ - "endTime", - "startTime", - "statusList" - ], - "members": { - "endTime": { - "shape": "Timestamp", - "documentation": "

End time of a contact.

" - }, - "groundStation": { - "shape": "String", - "documentation": "

Name of a ground station.

" - }, - "maxResults": { - "shape": "Integer", - "documentation": "

Maximum number of contacts returned.

" - }, - "missionProfileArn": { - "shape": "MissionProfileArn", - "documentation": "

ARN of a mission profile.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the request of a previous ListContacts call. Used to get the next page of results.

" - }, - "satelliteArn": { - "shape": "satelliteArn", - "documentation": "

ARN of a satellite.

" - }, - "startTime": { - "shape": "Timestamp", - "documentation": "

Start time of a contact.

" - }, - "statusList": { - "shape": "StatusList", - "documentation": "

Status of a contact reservation.

" - } - }, - "documentation": "

" - }, - "ContactData": { - "type": "structure", - "members": { - "contactId": { - "shape": "String", - "documentation": "

UUID of a contact.

" - }, - "contactStatus": { - "shape": "ContactStatus", - "documentation": "

Status of a contact.

" - }, - "endTime": { - "shape": "Timestamp", - "documentation": "

End time of a contact.

" - }, - "errorMessage": { - "shape": "String", - "documentation": "

Error message of a contact.

" - }, - "groundStation": { - "shape": "String", - "documentation": "

Name of a ground station.

" - }, - "maximumElevation": { - "shape": "Elevation", - "documentation": "

Maximum elevation angle of a contact.

" - }, - "missionProfileArn": { - "shape": "MissionProfileArn", - "documentation": "

ARN of a mission profile.

" - }, - "postPassEndTime": { - "shape": "Timestamp", - "documentation": "

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" - }, - "prePassStartTime": { - "shape": "Timestamp", - "documentation": "

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" - }, - "satelliteArn": { - "shape": "satelliteArn", - "documentation": "

ARN of a satellite.

" - }, - "startTime": { - "shape": "Timestamp", - "documentation": "

Start time of a contact.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a contact.

" - } - }, - "documentation": "

Data describing a contact.

" - }, - "ListGroundStationsResponse": { - "type": "structure", - "members": { - "groundStationList": { - "shape": "GroundStationList", - "documentation": "

List of ground stations.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token that can be supplied in the next call to get the next page of ground stations.

" - } - }, - "documentation": "

" - }, - "DataflowEndpoint": { - "type": "structure", - "members": { - "address": { - "shape": "SocketAddress", - "documentation": "

Socket address of a dataflow endpoint.

" - }, - "name": { - "shape": "SafeName", - "documentation": "

Name of a dataflow endpoint.

" - }, - "status": { - "shape": "EndpointStatus", - "documentation": "

Status of a dataflow endpoint.

" - } - }, - "documentation": "

Information about a dataflow endpoint.

" - }, - "ListConfigsRequest": { - "type": "structure", - "members": { - "maxResults": { - "shape": "Integer", - "documentation": "

Maximum number of Configs returned.

", - "location": "querystring", - "locationName": "maxResults" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the request of a previous ListConfigs call. Used to get the next page of results.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "

" - }, - "SocketAddress": { - "type": "structure", - "required": [ - "name", - "port" - ], - "members": { - "name": { - "shape": "String", - "documentation": "

Name of a socket address.

" - }, - "port": { - "shape": "Integer", - "documentation": "

Port of a socket address.

" - } - }, - "documentation": "

Information about the socket address.

" - }, - "GetConfigResponse": { - "type": "structure", - "required": [ - "configArn", - "configData", - "configId", - "name" - ], - "members": { - "configArn": { - "shape": "ConfigArn", - "documentation": "

ARN of a Config\n

" - }, - "configData": { - "shape": "ConfigTypeData", - "documentation": "

Data elements in a Config.

" - }, - "configId": { - "shape": "String", - "documentation": "

UUID of a Config.

" - }, - "configType": { - "shape": "ConfigCapabilityType", - "documentation": "

Type of a Config.

" - }, - "name": { - "shape": "String", - "documentation": "

Name of a Config.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a Config.

" - } - }, - "documentation": "

" - }, - "TagsMap": { - "type": "map", - "key": { - "shape": "String" - }, - "value": { - "shape": "String" - } - }, - "TagResourceResponse": { - "type": "structure", - "members": { }, - "documentation": "

" - }, - "DeleteMissionProfileRequest": { - "type": "structure", - "required": [ - "missionProfileId" - ], - "members": { - "missionProfileId": { - "shape": "String", - "documentation": "

UUID of a mission profile.

", - "location": "uri", - "locationName": "missionProfileId" - } - }, - "documentation": "

" - }, - "DataflowEndpointGroupList": { - "type": "list", - "member": { - "shape": "DataflowEndpointListItem" - } - }, - "ContactList": { - "type": "list", - "member": { - "shape": "ContactData" - } - }, - "DurationInSeconds": { - "type": "integer", - "min": 1, - "max": 21600 - }, - "SatelliteListItem": { - "type": "structure", - "members": { - "noradSatelliteID": { - "shape": "noradSatelliteID", - "documentation": "

NORAD satellite ID number.

" - }, - "satelliteArn": { - "shape": "satelliteArn", - "documentation": "

ARN of a satellite.

" - }, - "satelliteId": { - "shape": "Uuid", - "documentation": "

ID of a satellite.

" - } - }, - "documentation": "

Item in a list of satellites.

" - }, - "GetMissionProfileRequest": { - "type": "structure", - "required": [ - "missionProfileId" - ], - "members": { - "missionProfileId": { - "shape": "String", - "documentation": "

UUID of a mission profile.

", - "location": "uri", - "locationName": "missionProfileId" - } - }, - "documentation": "

" - }, - "Double": { - "type": "double", - "box": true - }, - "ListSatellitesResponse": { - "type": "structure", - "members": { - "nextToken": { - "shape": "String", - "documentation": "

Next token that can be supplied in the next call to get the next page of satellites.

" - }, - "satellites": { - "shape": "SatelliteList", - "documentation": "

List of satellites.

" - } - }, - "documentation": "

" - }, - "CreateMissionProfileRequest": { - "type": "structure", - "required": [ + "CreateMissionProfileRequest":{ + "type":"structure", + "required":[ "dataflowEdges", "minimumViableContactDurationSeconds", "name", "trackingConfigArn" ], - "members": { - "contactPostPassDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + "members":{ + "contactPostPassDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" }, - "contactPrePassDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" + "contactPrePassDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" }, - "dataflowEdges": { - "shape": "DataflowEdgeList", - "documentation": "

A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to \n Config.

" + "dataflowEdges":{ + "shape":"DataflowEdgeList", + "documentation":"

A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to Config.

" }, - "minimumViableContactDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Smallest amount of time in seconds that you’d like to see for an available contact. AWS Ground Station will not present you with contacts shorter than this duration.

" + "minimumViableContactDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Smallest amount of time in seconds that you’d like to see for an available contact. AWS Ground Station will not present you with contacts shorter than this duration.

" }, - "name": { - "shape": "SafeName", - "documentation": "

Name of a mission profile.

" + "name":{ + "shape":"SafeName", + "documentation":"

Name of a mission profile.

" }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a mission profile.

" + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a mission profile.

" }, - "trackingConfigArn": { - "shape": "ConfigArn", - "documentation": "

ARN of a tracking Config.

" + "trackingConfigArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of a tracking Config.

" } }, - "documentation": "

" + "documentation":"

" }, - "ReserveContactRequest": { - "type": "structure", - "required": [ + "Criticality":{ + "type":"string", + "enum":[ + "PREFERRED", + "REMOVED", + "REQUIRED" + ] + }, + "DataflowEdge":{ + "type":"list", + "member":{"shape":"ConfigArn"}, + "max":2, + "min":2 + }, + "DataflowEdgeList":{ + "type":"list", + "member":{"shape":"DataflowEdge"} + }, + "DataflowEndpoint":{ + "type":"structure", + "members":{ + "address":{ + "shape":"SocketAddress", + "documentation":"

Socket address of a dataflow endpoint.

" + }, + "name":{ + "shape":"SafeName", + "documentation":"

Name of a dataflow endpoint.

" + }, + "status":{ + "shape":"EndpointStatus", + "documentation":"

Status of a dataflow endpoint.

" + } + }, + "documentation":"

Information about a dataflow endpoint.

" + }, + "DataflowEndpointConfig":{ + "type":"structure", + "required":["dataflowEndpointName"], + "members":{ + "dataflowEndpointName":{ + "shape":"String", + "documentation":"

Name of a dataflow endpoint.

" + }, + "dataflowEndpointRegion":{ + "shape":"String", + "documentation":"

Region of a dataflow endpoint.

" + } + }, + "documentation":"

Information about the dataflow endpoint Config.

" + }, + "DataflowEndpointGroupArn":{"type":"string"}, + "DataflowEndpointGroupIdResponse":{ + "type":"structure", + "members":{ + "dataflowEndpointGroupId":{ + "shape":"String", + "documentation":"

UUID of a dataflow endpoint group.

" + } + }, + "documentation":"

" + }, + "DataflowEndpointGroupList":{ + "type":"list", + "member":{"shape":"DataflowEndpointListItem"} + }, + "DataflowEndpointListItem":{ + "type":"structure", + "members":{ + "dataflowEndpointGroupArn":{ + "shape":"DataflowEndpointGroupArn", + "documentation":"

ARN of a dataflow endpoint group.

" + }, + "dataflowEndpointGroupId":{ + "shape":"String", + "documentation":"

UUID of a dataflow endpoint group.

" + } + }, + "documentation":"

Item in a list of DataflowEndpoint groups.

" + }, + "DecodeConfig":{ + "type":"structure", + "required":["unvalidatedJSON"], + "members":{ + "unvalidatedJSON":{ + "shape":"JsonString", + "documentation":"

Unvalidated JSON of a decode Config.

" + } + }, + "documentation":"

Information about the decode Config.

" + }, + "DeleteConfigRequest":{ + "type":"structure", + "required":[ + "configId", + "configType" + ], + "members":{ + "configId":{ + "shape":"String", + "documentation":"

UUID of a Config.

", + "location":"uri", + "locationName":"configId" + }, + "configType":{ + "shape":"ConfigCapabilityType", + "documentation":"

Type of a Config.

", + "location":"uri", + "locationName":"configType" + } + }, + "documentation":"

" + }, + "DeleteDataflowEndpointGroupRequest":{ + "type":"structure", + "required":["dataflowEndpointGroupId"], + "members":{ + "dataflowEndpointGroupId":{ + "shape":"String", + "documentation":"

UUID of a dataflow endpoint group.

", + "location":"uri", + "locationName":"dataflowEndpointGroupId" + } + }, + "documentation":"

" + }, + "DeleteMissionProfileRequest":{ + "type":"structure", + "required":["missionProfileId"], + "members":{ + "missionProfileId":{ + "shape":"String", + "documentation":"

UUID of a mission profile.

", + "location":"uri", + "locationName":"missionProfileId" + } + }, + "documentation":"

" + }, + "DemodulationConfig":{ + "type":"structure", + "required":["unvalidatedJSON"], + "members":{ + "unvalidatedJSON":{ + "shape":"JsonString", + "documentation":"

Unvalidated JSON of a demodulation Config.

" + } + }, + "documentation":"

Information about the demodulation Config.

" + }, + "DependencyException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "parameterName":{ + "shape":"String", + "documentation":"

" + } + }, + "documentation":"

Dependency encountered an error.

", + "error":{"httpStatusCode":531}, + "exception":true, + "fault":true + }, + "DescribeContactRequest":{ + "type":"structure", + "required":["contactId"], + "members":{ + "contactId":{ + "shape":"String", + "documentation":"

UUID of a contact.

", + "location":"uri", + "locationName":"contactId" + } + }, + "documentation":"

" + }, + "DescribeContactResponse":{ + "type":"structure", + "members":{ + "contactId":{ + "shape":"String", + "documentation":"

UUID of a contact.

" + }, + "contactStatus":{ + "shape":"ContactStatus", + "documentation":"

Status of a contact.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

End time of a contact.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

Error message for a contact.

" + }, + "groundStation":{ + "shape":"String", + "documentation":"

Ground station for a contact.

" + }, + "maximumElevation":{ + "shape":"Elevation", + "documentation":"

Maximum elevation angle of a contact.

" + }, + "missionProfileArn":{ + "shape":"MissionProfileArn", + "documentation":"

ARN of a mission profile.

" + }, + "postPassEndTime":{ + "shape":"Timestamp", + "documentation":"

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + }, + "prePassStartTime":{ + "shape":"Timestamp", + "documentation":"

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" + }, + "region":{ + "shape":"String", + "documentation":"

Region of a contact.

" + }, + "satelliteArn":{ + "shape":"satelliteArn", + "documentation":"

ARN of a satellite.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

Start time of a contact.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a contact.

" + } + }, + "documentation":"

" + }, + "Double":{ + "type":"double", + "box":true + }, + "DurationInSeconds":{ + "type":"integer", + "max":21600, + "min":1 + }, + "Eirp":{ + "type":"structure", + "required":[ + "units", + "value" + ], + "members":{ + "units":{ + "shape":"EirpUnits", + "documentation":"

Units of an EIRP.

" + }, + "value":{ + "shape":"Double", + "documentation":"

Value of an EIRP.

" + } + }, + "documentation":"

Object that represents EIRP.

" + }, + "EirpUnits":{ + "type":"string", + "enum":["dBW"] + }, + "Elevation":{ + "type":"structure", + "required":[ + "unit", + "value" + ], + "members":{ + "unit":{ + "shape":"AngleUnits", + "documentation":"

Elevation angle units.

" + }, + "value":{ + "shape":"Double", + "documentation":"

Elevation angle value.

" + } + }, + "documentation":"

Elevation angle of the satellite in the sky during a contact.

" + }, + "EndpointDetails":{ + "type":"structure", + "members":{ + "endpoint":{ + "shape":"DataflowEndpoint", + "documentation":"

A dataflow endpoint.

" + }, + "securityDetails":{ + "shape":"SecurityDetails", + "documentation":"

Endpoint security details.

" + } + }, + "documentation":"

Information about the endpoint details.

" + }, + "EndpointDetailsList":{ + "type":"list", + "member":{"shape":"EndpointDetails"} + }, + "EndpointStatus":{ + "type":"string", + "enum":[ + "created", + "creating", + "deleted", + "deleting", + "failed" + ] + }, + "Frequency":{ + "type":"structure", + "required":[ + "units", + "value" + ], + "members":{ + "units":{ + "shape":"FrequencyUnits", + "documentation":"

Frequency units.

" + }, + "value":{ + "shape":"Double", + "documentation":"

Frequency value.

" + } + }, + "documentation":"

Object that describes the frequency.

" + }, + "FrequencyBandwidth":{ + "type":"structure", + "required":[ + "units", + "value" + ], + "members":{ + "units":{ + "shape":"BandwidthUnits", + "documentation":"

Frequency bandwidth units.

" + }, + "value":{ + "shape":"Double", + "documentation":"

Frequency bandwidth value.

" + } + }, + "documentation":"

Object that describes the frequency bandwidth.

" + }, + "FrequencyUnits":{ + "type":"string", + "enum":[ + "GHz", + "MHz", + "kHz" + ] + }, + "GetConfigRequest":{ + "type":"structure", + "required":[ + "configId", + "configType" + ], + "members":{ + "configId":{ + "shape":"String", + "documentation":"

UUID of a Config.

", + "location":"uri", + "locationName":"configId" + }, + "configType":{ + "shape":"ConfigCapabilityType", + "documentation":"

Type of a Config.

", + "location":"uri", + "locationName":"configType" + } + }, + "documentation":"

" + }, + "GetConfigResponse":{ + "type":"structure", + "required":[ + "configArn", + "configData", + "configId", + "name" + ], + "members":{ + "configArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of a Config

" + }, + "configData":{ + "shape":"ConfigTypeData", + "documentation":"

Data elements in a Config.

" + }, + "configId":{ + "shape":"String", + "documentation":"

UUID of a Config.

" + }, + "configType":{ + "shape":"ConfigCapabilityType", + "documentation":"

Type of a Config.

" + }, + "name":{ + "shape":"String", + "documentation":"

Name of a Config.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a Config.

" + } + }, + "documentation":"

" + }, + "GetDataflowEndpointGroupRequest":{ + "type":"structure", + "required":["dataflowEndpointGroupId"], + "members":{ + "dataflowEndpointGroupId":{ + "shape":"String", + "documentation":"

UUID of a dataflow endpoint group.

", + "location":"uri", + "locationName":"dataflowEndpointGroupId" + } + }, + "documentation":"

" + }, + "GetDataflowEndpointGroupResponse":{ + "type":"structure", + "members":{ + "dataflowEndpointGroupArn":{ + "shape":"DataflowEndpointGroupArn", + "documentation":"

ARN of a dataflow endpoint group.

" + }, + "dataflowEndpointGroupId":{ + "shape":"String", + "documentation":"

UUID of a dataflow endpoint group.

" + }, + "endpointsDetails":{ + "shape":"EndpointDetailsList", + "documentation":"

Details of a dataflow endpoint.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a dataflow endpoint group.

" + } + }, + "documentation":"

" + }, + "GetMinuteUsageRequest":{ + "type":"structure", + "required":[ + "month", + "year" + ], + "members":{ + "month":{ + "shape":"Integer", + "documentation":"

The month being requested, with a value of 1-12.

" + }, + "year":{ + "shape":"Integer", + "documentation":"

The year being requested, in the format of YYYY.

" + } + }, + "documentation":"

" + }, + "GetMinuteUsageResponse":{ + "type":"structure", + "members":{ + "estimatedMinutesRemaining":{ + "shape":"Integer", + "documentation":"

Estimated number of minutes remaining for an account, specific to the month being requested.

" + }, + "isReservedMinutesCustomer":{ + "shape":"Boolean", + "documentation":"

Returns whether or not an account has signed up for the reserved minutes pricing plan, specific to the month being requested.

" + }, + "totalReservedMinuteAllocation":{ + "shape":"Integer", + "documentation":"

Total number of reserved minutes allocated, specific to the month being requested.

" + }, + "totalScheduledMinutes":{ + "shape":"Integer", + "documentation":"

Total scheduled minutes for an account, specific to the month being requested.

" + }, + "upcomingMinutesScheduled":{ + "shape":"Integer", + "documentation":"

Upcoming minutes scheduled for an account, specific to the month being requested.

" + } + }, + "documentation":"

" + }, + "GetMissionProfileRequest":{ + "type":"structure", + "required":["missionProfileId"], + "members":{ + "missionProfileId":{ + "shape":"String", + "documentation":"

UUID of a mission profile.

", + "location":"uri", + "locationName":"missionProfileId" + } + }, + "documentation":"

" + }, + "GetMissionProfileResponse":{ + "type":"structure", + "members":{ + "contactPostPassDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + }, + "contactPrePassDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" + }, + "dataflowEdges":{ + "shape":"DataflowEdgeList", + "documentation":"

A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to Config.

" + }, + "minimumViableContactDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Smallest amount of time in seconds that you’d like to see for an available contact. AWS Ground Station will not present you with contacts shorter than this duration.

" + }, + "missionProfileArn":{ + "shape":"MissionProfileArn", + "documentation":"

ARN of a mission profile.

" + }, + "missionProfileId":{ + "shape":"String", + "documentation":"

UUID of a mission profile.

" + }, + "name":{ + "shape":"String", + "documentation":"

Name of a mission profile.

" + }, + "region":{ + "shape":"String", + "documentation":"

Region of a mission profile.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a mission profile.

" + }, + "trackingConfigArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of a tracking Config.

" + } + }, + "documentation":"

" + }, + "GetSatelliteRequest":{ + "type":"structure", + "required":["satelliteId"], + "members":{ + "satelliteId":{ + "shape":"String", + "documentation":"

UUID of a satellite.

", + "location":"uri", + "locationName":"satelliteId" + } + }, + "documentation":"

" + }, + "GetSatelliteResponse":{ + "type":"structure", + "members":{ + "groundStations":{ + "shape":"GroundStationIdList", + "documentation":"

A list of ground stations to which the satellite is on-boarded.

" + }, + "noradSatelliteID":{ + "shape":"noradSatelliteID", + "documentation":"

NORAD satellite ID number.

" + }, + "satelliteArn":{ + "shape":"satelliteArn", + "documentation":"

ARN of a satellite.

" + }, + "satelliteId":{ + "shape":"Uuid", + "documentation":"

UUID of a satellite.

" + } + }, + "documentation":"

" + }, + "GroundStationData":{ + "type":"structure", + "members":{ + "groundStationId":{ + "shape":"String", + "documentation":"

UUID of a ground station.

" + }, + "groundStationName":{ + "shape":"String", + "documentation":"

Name of a ground station.

" + }, + "region":{ + "shape":"String", + "documentation":"

Ground station Region.

" + } + }, + "documentation":"

Information about the ground station data.

" + }, + "GroundStationIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "GroundStationList":{ + "type":"list", + "member":{"shape":"GroundStationData"} + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "parameterName":{ + "shape":"String", + "documentation":"

" + } + }, + "documentation":"

One or more parameters are not valid.

", + "error":{ + "httpStatusCode":431, + "senderFault":true + }, + "exception":true + }, + "JsonString":{ + "type":"string", + "max":8192, + "min":2, + "pattern":"^[{}\\[\\]:.,\"0-9A-z\\-_\\s]{2,8192}$" + }, + "ListConfigsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

Maximum number of Configs returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the request of a previous ListConfigs call. Used to get the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

" + }, + "ListConfigsResponse":{ + "type":"structure", + "members":{ + "configList":{ + "shape":"ConfigList", + "documentation":"

List of Config items.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the response of a previous ListConfigs call. Used to get the next page of results.

" + } + }, + "documentation":"

" + }, + "ListContactsRequest":{ + "type":"structure", + "required":[ + "endTime", + "startTime", + "statusList" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

End time of a contact.

" + }, + "groundStation":{ + "shape":"String", + "documentation":"

Name of a ground station.

" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

Maximum number of contacts returned.

" + }, + "missionProfileArn":{ + "shape":"MissionProfileArn", + "documentation":"

ARN of a mission profile.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the request of a previous ListContacts call. Used to get the next page of results.

" + }, + "satelliteArn":{ + "shape":"satelliteArn", + "documentation":"

ARN of a satellite.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

Start time of a contact.

" + }, + "statusList":{ + "shape":"StatusList", + "documentation":"

Status of a contact reservation.

" + } + }, + "documentation":"

" + }, + "ListContactsResponse":{ + "type":"structure", + "members":{ + "contactList":{ + "shape":"ContactList", + "documentation":"

List of contacts.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the response of a previous ListContacts call. Used to get the next page of results.

" + } + }, + "documentation":"

" + }, + "ListDataflowEndpointGroupsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

Maximum number of dataflow endpoint groups returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the request of a previous ListDataflowEndpointGroups call. Used to get the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

" + }, + "ListDataflowEndpointGroupsResponse":{ + "type":"structure", + "members":{ + "dataflowEndpointGroupList":{ + "shape":"DataflowEndpointGroupList", + "documentation":"

A list of dataflow endpoint groups.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the response of a previous ListDataflowEndpointGroups call. Used to get the next page of results.

" + } + }, + "documentation":"

" + }, + "ListGroundStationsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

Maximum number of ground stations returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token that can be supplied in the next call to get the next page of ground stations.

", + "location":"querystring", + "locationName":"nextToken" + }, + "satelliteId":{ + "shape":"String", + "documentation":"

Satellite ID to retrieve on-boarded ground stations.

", + "location":"querystring", + "locationName":"satelliteId" + } + }, + "documentation":"

" + }, + "ListGroundStationsResponse":{ + "type":"structure", + "members":{ + "groundStationList":{ + "shape":"GroundStationList", + "documentation":"

List of ground stations.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token that can be supplied in the next call to get the next page of ground stations.

" + } + }, + "documentation":"

" + }, + "ListMissionProfilesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

Maximum number of mission profiles returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the request of a previous ListMissionProfiles call. Used to get the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

" + }, + "ListMissionProfilesResponse":{ + "type":"structure", + "members":{ + "missionProfileList":{ + "shape":"MissionProfileList", + "documentation":"

List of mission profiles.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token returned in the response of a previous ListMissionProfiles call. Used to get the next page of results.

" + } + }, + "documentation":"

" + }, + "ListSatellitesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

Maximum number of satellites returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Next token that can be supplied in the next call to get the next page of satellites.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

" + }, + "ListSatellitesResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

Next token that can be supplied in the next call to get the next page of satellites.

" + }, + "satellites":{ + "shape":"SatelliteList", + "documentation":"

List of satellites.

" + } + }, + "documentation":"

" + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

ARN of a resource.

", + "location":"uri", + "locationName":"resourceArn" + } + }, + "documentation":"

" + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a resource.

" + } + }, + "documentation":"

" + }, + "MissionProfileArn":{"type":"string"}, + "MissionProfileIdResponse":{ + "type":"structure", + "members":{ + "missionProfileId":{ + "shape":"String", + "documentation":"

UUID of a mission profile.

" + } + }, + "documentation":"

" + }, + "MissionProfileList":{ + "type":"list", + "member":{"shape":"MissionProfileListItem"} + }, + "MissionProfileListItem":{ + "type":"structure", + "members":{ + "missionProfileArn":{ + "shape":"MissionProfileArn", + "documentation":"

ARN of a mission profile.

" + }, + "missionProfileId":{ + "shape":"String", + "documentation":"

UUID of a mission profile.

" + }, + "name":{ + "shape":"String", + "documentation":"

Name of a mission profile.

" + }, + "region":{ + "shape":"String", + "documentation":"

Region of a mission profile.

" + } + }, + "documentation":"

Item in a list of mission profiles.

" + }, + "Polarization":{ + "type":"string", + "enum":[ + "LEFT_HAND", + "NONE", + "RIGHT_HAND" + ] + }, + "ReserveContactRequest":{ + "type":"structure", + "required":[ "endTime", "groundStation", "missionProfileArn", "satelliteArn", "startTime" ], - "members": { - "endTime": { - "shape": "Timestamp", - "documentation": "

End time of a contact.

" + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

End time of a contact.

" }, - "groundStation": { - "shape": "String", - "documentation": "

Name of a ground station.

" + "groundStation":{ + "shape":"String", + "documentation":"

Name of a ground station.

" }, - "missionProfileArn": { - "shape": "MissionProfileArn", - "documentation": "

ARN of a mission profile.

" + "missionProfileArn":{ + "shape":"MissionProfileArn", + "documentation":"

ARN of a mission profile.

" }, - "satelliteArn": { - "shape": "satelliteArn", - "documentation": "

ARN of a satellite

" + "satelliteArn":{ + "shape":"satelliteArn", + "documentation":"

ARN of a satellite

" }, - "startTime": { - "shape": "Timestamp", - "documentation": "

Start time of a contact.

" + "startTime":{ + "shape":"Timestamp", + "documentation":"

Start time of a contact.

" }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a contact.

" + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a contact.

" } }, - "documentation": "

" + "documentation":"

" }, - "DataflowEndpointConfig": { - "type": "structure", - "required": [ - "dataflowEndpointName" + "ResourceLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "parameterName":{ + "shape":"String", + "documentation":"

" + } + }, + "documentation":"

Account limits for this resource have been exceeded.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Resource was not found.

", + "error":{ + "httpStatusCode":434, + "senderFault":true + }, + "exception":true + }, + "RoleArn":{"type":"string"}, + "SafeName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[ a-zA-Z0-9_:-]{1,256}$" + }, + "SatelliteList":{ + "type":"list", + "member":{"shape":"SatelliteListItem"} + }, + "SatelliteListItem":{ + "type":"structure", + "members":{ + "groundStations":{ + "shape":"GroundStationIdList", + "documentation":"

A list of ground stations to which the satellite is on-boarded.

" + }, + "noradSatelliteID":{ + "shape":"noradSatelliteID", + "documentation":"

NORAD satellite ID number.

" + }, + "satelliteArn":{ + "shape":"satelliteArn", + "documentation":"

ARN of a satellite.

" + }, + "satelliteId":{ + "shape":"Uuid", + "documentation":"

UUID of a satellite.

" + } + }, + "documentation":"

Item in a list of satellites.

" + }, + "SecurityDetails":{ + "type":"structure", + "required":[ + "roleArn", + "securityGroupIds", + "subnetIds" ], - "members": { - "dataflowEndpointName": { - "shape": "String", - "documentation": "

Name of a dataflow endpoint.

" + "members":{ + "roleArn":{ + "shape":"RoleArn", + "documentation":"

ARN to a role needed for connecting streams to your instances.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The security groups to attach to the elastic network interfaces.

" + }, + "subnetIds":{ + "shape":"SubnetList", + "documentation":"

A list of subnets where AWS Ground Station places elastic network interfaces to send streams to your instances.

" } }, - "documentation": "

Information about the dataflow endpoint Config.

" + "documentation":"

Information about endpoints.

" }, - "Uuid": { - "type": "string", - "min": 1, - "max": 128, - "pattern": "[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"String"} }, - "ListTagsForResourceResponse": { - "type": "structure", - "members": { - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a resource.

" - } - }, - "documentation": "

" - }, - "MissionProfileArn": { - "type": "string" - }, - "ListContactsResponse": { - "type": "structure", - "members": { - "contactList": { - "shape": "ContactList", - "documentation": "

List of contacts.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the response of a previous ListContacts call. Used to get the next page of results.

" - } - }, - "documentation": "

" - }, - "DataflowEdgeList": { - "type": "list", - "member": { - "shape": "DataflowEdge" - } - }, - "DescribeContactResponse": { - "type": "structure", - "members": { - "contactId": { - "shape": "String", - "documentation": "

UUID of a contact.

" - }, - "contactStatus": { - "shape": "ContactStatus", - "documentation": "

Status of a contact.

" - }, - "endTime": { - "shape": "Timestamp", - "documentation": "

End time of a contact.

" - }, - "errorMessage": { - "shape": "String", - "documentation": "

Error message for a contact.

" - }, - "groundStation": { - "shape": "String", - "documentation": "

Ground station for a contact.

" - }, - "maximumElevation": { - "shape": "Elevation", - "documentation": "

Maximum elevation angle of a contact.

" - }, - "missionProfileArn": { - "shape": "MissionProfileArn", - "documentation": "

ARN of a mission profile.

" - }, - "postPassEndTime": { - "shape": "Timestamp", - "documentation": "

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" - }, - "prePassStartTime": { - "shape": "Timestamp", - "documentation": "

Amount of time prior to contact start you’d like to receive a CloudWatch event indicating an upcoming pass.

" - }, - "satelliteArn": { - "shape": "satelliteArn", - "documentation": "

ARN of a satellite.

" - }, - "startTime": { - "shape": "Timestamp", - "documentation": "

Start time of a contact.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a contact.

" - } - }, - "documentation": "

" - }, - "ConfigListItem": { - "type": "structure", - "members": { - "configArn": { - "shape": "ConfigArn", - "documentation": "

ARN of a Config.

" - }, - "configId": { - "shape": "String", - "documentation": "

UUID of a Config.

" - }, - "configType": { - "shape": "ConfigCapabilityType", - "documentation": "

Type of a Config.

" - }, - "name": { - "shape": "String", - "documentation": "

Name of a Config.

" - } - }, - "documentation": "

An item in a list of Config objects.

" - }, - "ListTagsForResourceRequest": { - "type": "structure", - "required": [ - "resourceArn" + "SocketAddress":{ + "type":"structure", + "required":[ + "name", + "port" ], - "members": { - "resourceArn": { - "shape": "String", - "documentation": "

ARN of a resource.

", - "location": "uri", - "locationName": "resourceArn" - } - }, - "documentation": "

" - }, - "ListDataflowEndpointGroupsRequest": { - "type": "structure", - "members": { - "maxResults": { - "shape": "Integer", - "documentation": "

Maximum number of dataflow endpoint groups returned.

", - "location": "querystring", - "locationName": "maxResults" + "members":{ + "name":{ + "shape":"String", + "documentation":"

Name of a socket address.

" }, - "nextToken": { - "shape": "String", - "documentation": "

Next token returned in the request of a previous ListDataflowEndpointGroups call. Used to get the next page of results.

", - "location": "querystring", - "locationName": "nextToken" + "port":{ + "shape":"Integer", + "documentation":"

Port of a socket address.

" } }, - "documentation": "

" + "documentation":"

Information about the socket address.

" }, - "FrequencyBandwidth": { - "type": "structure", - "required": [ - "units", - "value" + "SpectrumConfig":{ + "type":"structure", + "required":[ + "bandwidth", + "centerFrequency" ], - "members": { - "units": { - "shape": "BandwidthUnits", - "documentation": "

Frequency bandwidth units.

" + "members":{ + "bandwidth":{ + "shape":"FrequencyBandwidth", + "documentation":"

Bandwidth of a spectral Config.

" }, - "value": { - "shape": "Double", - "documentation": "

Frequency bandwidth value.

" + "centerFrequency":{ + "shape":"Frequency", + "documentation":"

Center frequency of a spectral Config.

" + }, + "polarization":{ + "shape":"Polarization", + "documentation":"

Polarization of a spectral Config.

" } }, - "documentation": "

Object that describes the frequency bandwidth.

" + "documentation":"

Object that describes a spectral Config.

" }, - "String": { - "type": "string" + "StatusList":{ + "type":"list", + "member":{"shape":"ContactStatus"} }, - "ListSatellitesRequest": { - "type": "structure", - "members": { - "maxResults": { - "shape": "Integer", - "documentation": "

Maximum number of satellites returned.

", - "location": "querystring", - "locationName": "maxResults" - }, - "nextToken": { - "shape": "String", - "documentation": "

Next token that can be supplied in the next call to get the next page of satellites.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "

" + "String":{"type":"string"}, + "SubnetList":{ + "type":"list", + "member":{"shape":"String"} }, - "UpdateMissionProfileRequest": { - "type": "structure", - "required": [ - "missionProfileId" + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" ], - "members": { - "contactPostPassDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

ARN of a resource tag.

", + "location":"uri", + "locationName":"resourceArn" }, - "contactPrePassDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" - }, - "dataflowEdges": { - "shape": "DataflowEdgeList", - "documentation": "

A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to \n Config.

" - }, - "minimumViableContactDurationSeconds": { - "shape": "DurationInSeconds", - "documentation": "

Smallest amount of time in seconds that you’d like to see for an available contact. AWS Ground Station will not present you with contacts shorter than this duration.

" - }, - "missionProfileId": { - "shape": "String", - "documentation": "

ID of a mission profile.

", - "location": "uri", - "locationName": "missionProfileId" - }, - "name": { - "shape": "SafeName", - "documentation": "

Name of a mission profile.

" - }, - "trackingConfigArn": { - "shape": "ConfigArn", - "documentation": "

ARN of a tracking Config.

" + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags assigned to a resource.

" } }, - "documentation": "

" + "documentation":"

" }, - "FrequencyUnits": { - "type": "string", - "enum": [ - "GHz", - "MHz", - "kHz" - ] + "TagResourceResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

" }, - "TagResourceRequest": { - "type": "structure", - "required": [ - "resourceArn" + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Timestamp":{"type":"timestamp"}, + "TrackingConfig":{ + "type":"structure", + "required":["autotrack"], + "members":{ + "autotrack":{ + "shape":"Criticality", + "documentation":"

Current setting for autotrack.

" + } + }, + "documentation":"

Object that determines whether tracking should be used during a contact executed with this Config in the mission profile.

" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" ], - "members": { - "resourceArn": { - "shape": "String", - "documentation": "

ARN of a resource tag.

", - "location": "uri", - "locationName": "resourceArn" + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

ARN of a resource.

", + "location":"uri", + "locationName":"resourceArn" }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a resource.

" + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

Keys of a resource tag.

", + "location":"querystring", + "locationName":"tagKeys" } }, - "documentation": "

" + "documentation":"

" }, - "Criticality": { - "type": "string", - "enum": [ - "PREFERRED", - "REMOVED", - "REQUIRED" - ] - }, - "ConfigCapabilityType": { - "type": "string", - "enum": [ - "antenna-downlink", - "antenna-downlink-demod-decode", - "antenna-uplink", - "dataflow-endpoint", - "tracking", - "uplink-echo" - ] - }, - "TagKeys": { - "type": "list", - "member": { - "shape": "String" - } - }, - "AngleUnits": { - "type": "string", - "enum": [ - "DEGREE_ANGLE", - "RADIAN" - ] - }, - "DataflowEndpointListItem": { - "type": "structure", - "members": { - "dataflowEndpointGroupArn": { - "shape": "DataflowEndpointGroupArn", - "documentation": "

ARN of a dataflow endpoint group.

" - }, - "dataflowEndpointGroupId": { - "shape": "String", - "documentation": "

UUID of a dataflow endpoint group.

" - } + "UntagResourceResponse":{ + "type":"structure", + "members":{ }, - "documentation": "

Item in a list of DataflowEndpoint groups.

" + "documentation":"

" }, - "GetDataflowEndpointGroupResponse": { - "type": "structure", - "members": { - "dataflowEndpointGroupArn": { - "shape": "DataflowEndpointGroupArn", - "documentation": "

ARN of a dataflow endpoint group.

" - }, - "dataflowEndpointGroupId": { - "shape": "String", - "documentation": "

UUID of a dataflow endpoint group.

" - }, - "endpointsDetails": { - "shape": "EndpointDetailsList", - "documentation": "

Details of a dataflow endpoint.

" - }, - "tags": { - "shape": "TagsMap", - "documentation": "

Tags assigned to a dataflow endpoint group.

" - } - }, - "documentation": "

" - }, - "GetDataflowEndpointGroupRequest": { - "type": "structure", - "required": [ - "dataflowEndpointGroupId" + "UpdateConfigRequest":{ + "type":"structure", + "required":[ + "configData", + "configId", + "configType", + "name" ], - "members": { - "dataflowEndpointGroupId": { - "shape": "String", - "documentation": "

UUID of a dataflow endpoint group.

", - "location": "uri", - "locationName": "dataflowEndpointGroupId" - } - }, - "documentation": "

" - }, - "ConfigArn": { - "type": "string" - }, - "GetMinuteUsageRequest": { - "type": "structure", - "required": [ - "month", - "year" - ], - "members": { - "month": { - "shape": "Integer", - "documentation": "

The month being requested, with a value of 1-12.

" + "members":{ + "configData":{ + "shape":"ConfigTypeData", + "documentation":"

Parameters of a Config.

" }, - "year": { - "shape": "Integer", - "documentation": "

The year being requested, in the format of YYYY.

" + "configId":{ + "shape":"String", + "documentation":"

UUID of a Config.

", + "location":"uri", + "locationName":"configId" + }, + "configType":{ + "shape":"ConfigCapabilityType", + "documentation":"

Type of a Config.

", + "location":"uri", + "locationName":"configType" + }, + "name":{ + "shape":"SafeName", + "documentation":"

Name of a Config.

" } }, - "documentation": "

" + "documentation":"

" }, - "DataflowEndpointGroupIdResponse": { - "type": "structure", - "members": { - "dataflowEndpointGroupId": { - "shape": "String", - "documentation": "

ID of a dataflow endpoint group.

" + "UpdateMissionProfileRequest":{ + "type":"structure", + "required":["missionProfileId"], + "members":{ + "contactPostPassDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + }, + "contactPrePassDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Amount of time after a contact ends that you’d like to receive a CloudWatch event indicating the pass has finished.

" + }, + "dataflowEdges":{ + "shape":"DataflowEdgeList", + "documentation":"

A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to Config.

" + }, + "minimumViableContactDurationSeconds":{ + "shape":"DurationInSeconds", + "documentation":"

Smallest amount of time in seconds that you’d like to see for an available contact. AWS Ground Station will not present you with contacts shorter than this duration.

" + }, + "missionProfileId":{ + "shape":"String", + "documentation":"

UUID of a mission profile.

", + "location":"uri", + "locationName":"missionProfileId" + }, + "name":{ + "shape":"SafeName", + "documentation":"

Name of a mission profile.

" + }, + "trackingConfigArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of a tracking Config.

" } }, - "documentation": "

" + "documentation":"

" }, - "EirpUnits": { - "type": "string", - "enum": [ - "dBW" - ] - } - } + "UplinkEchoConfig":{ + "type":"structure", + "required":[ + "antennaUplinkConfigArn", + "enabled" + ], + "members":{ + "antennaUplinkConfigArn":{ + "shape":"ConfigArn", + "documentation":"

ARN of an uplink Config.

" + }, + "enabled":{ + "shape":"Boolean", + "documentation":"

Whether or not an uplink Config is enabled.

" + } + }, + "documentation":"

Information about an uplink echo Config.

Parameters from the AntennaUplinkConfig, corresponding to the specified AntennaUplinkConfigArn, are used when this UplinkEchoConfig is used in a contact.

" + }, + "UplinkSpectrumConfig":{ + "type":"structure", + "required":["centerFrequency"], + "members":{ + "centerFrequency":{ + "shape":"Frequency", + "documentation":"

Center frequency of an uplink spectral Config.

" + }, + "polarization":{ + "shape":"Polarization", + "documentation":"

Polarization of an uplink spectral Config.

" + } + }, + "documentation":"

Information about the uplink spectral Config.

" + }, + "Uuid":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + }, + "noradSatelliteID":{ + "type":"integer", + "max":99999, + "min":1 + }, + "satelliteArn":{"type":"string"} + }, + "documentation":"

Welcome to the AWS Ground Station API Reference. AWS Ground Station is a fully managed service that enables you to control satellite communications, downlink and process satellite data, and scale your satellite operations efficiently and cost-effectively without having to build or manage your own ground station infrastructure.

" } diff --git a/botocore/data/health/2016-08-04/paginators-1.json b/botocore/data/health/2016-08-04/paginators-1.json index dcf401ad..00146fd7 100644 --- a/botocore/data/health/2016-08-04/paginators-1.json +++ b/botocore/data/health/2016-08-04/paginators-1.json @@ -23,6 +23,27 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "eventTypes" + }, + "DescribeAffectedAccountsForOrganization": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "affectedAccounts" + }, + "DescribeAffectedEntitiesForOrganization": { + "input_token": "nextToken", + "limit_key": "maxResults", + "non_aggregate_keys": [ + "failedSet" + ], + "output_token": "nextToken", + "result_key": "entities" + }, + "DescribeEventsForOrganization": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "events" } } } diff --git a/botocore/data/health/2016-08-04/service-2.json b/botocore/data/health/2016-08-04/service-2.json index 00f4bbc4..877c1145 100644 --- a/botocore/data/health/2016-08-04/service-2.json +++ b/botocore/data/health/2016-08-04/service-2.json @@ -13,6 +13,20 @@ "uid":"health-2016-08-04" }, "operations":{ + "DescribeAffectedAccountsForOrganization":{ + "name":"DescribeAffectedAccountsForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAffectedAccountsForOrganizationRequest"}, + "output":{"shape":"DescribeAffectedAccountsForOrganizationResponse"}, + "errors":[ + {"shape":"InvalidPaginationToken"} + ], + "documentation":"

Returns a list of accounts in the organization from AWS Organizations that are affected by the provided event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

", + "idempotent":true + }, "DescribeAffectedEntities":{ "name":"DescribeAffectedEntities", "http":{ @@ -28,6 +42,21 @@ "documentation":"

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

", "idempotent":true }, + "DescribeAffectedEntitiesForOrganization":{ + "name":"DescribeAffectedEntitiesForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAffectedEntitiesForOrganizationRequest"}, + "output":{"shape":"DescribeAffectedEntitiesForOrganizationResponse"}, + "errors":[ + {"shape":"InvalidPaginationToken"}, + {"shape":"UnsupportedLocale"} + ], + "documentation":"

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in AWS Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service.

At least one event ARN and account ID are required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

", + "idempotent":true + }, "DescribeEntityAggregates":{ "name":"DescribeEntityAggregates", "http":{ @@ -64,7 +93,21 @@ "errors":[ {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns detailed information about one or more specified events. Information includes standard event data (region, service, etc., as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included; to retrieve those, use the DescribeAffectedEntities operation.

If a specified event cannot be retrieved, an error message is returned for that event.

", + "documentation":"

Returns detailed information about one or more specified events. Information includes standard event data (region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included; to retrieve those, use the DescribeAffectedEntities operation.

If a specified event cannot be retrieved, an error message is returned for that event.

", + "idempotent":true + }, + "DescribeEventDetailsForOrganization":{ + "name":"DescribeEventDetailsForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventDetailsForOrganizationRequest"}, + "output":{"shape":"DescribeEventDetailsForOrganizationResponse"}, + "errors":[ + {"shape":"UnsupportedLocale"} + ], + "documentation":"

Returns detailed information about one or more specified events for one or more accounts in your organization. Information includes standard event data (Region, service, and so on, as returned by DescribeEventsForOrganization, a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included; to retrieve those, use the DescribeAffectedEntitiesForOrganization operation.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

", "idempotent":true }, "DescribeEventTypes":{ @@ -96,6 +139,55 @@ ], "documentation":"

Returns information about events that meet the specified filter criteria. Events are returned in a summary form and do not include the detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the DescribeEventDetails and DescribeAffectedEntities operations.

If no filter criteria are specified, all events are returned. Results are sorted by lastModifiedTime, starting with the most recent.

", "idempotent":true + }, + "DescribeEventsForOrganization":{ + "name":"DescribeEventsForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsForOrganizationRequest"}, + "output":{"shape":"DescribeEventsForOrganizationResponse"}, + "errors":[ + {"shape":"InvalidPaginationToken"}, + {"shape":"UnsupportedLocale"} + ], + "documentation":"

Returns information about events across your organization in AWS Organizations, meeting the specified filter criteria. Events are returned in a summary form and do not include the accounts impacted, detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the DescribeAffectedAccountsForOrganization, DescribeEventDetailsForOrganization, and DescribeAffectedEntitiesForOrganization operations.

If no filter criteria are specified, all events across your organization are returned. Results are sorted by lastModifiedTime, starting with the most recent.

Before you can call this operation, you must first enable Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

", + "idempotent":true + }, + "DescribeHealthServiceStatusForOrganization":{ + "name":"DescribeHealthServiceStatusForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeHealthServiceStatusForOrganizationResponse"}, + "documentation":"

This operation provides status information on enabling or disabling AWS Health to work with your organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's master account.

", + "idempotent":true + }, + "DisableHealthServiceAccessForOrganization":{ + "name":"DisableHealthServiceAccessForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "errors":[ + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Calling this operation disables Health from working with AWS Organizations. This does not remove the Service Linked Role (SLR) from the the master account in your organization. Use the IAM console, API, or AWS CLI to remove the SLR if desired. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's master account.

", + "idempotent":true + }, + "EnableHealthServiceAccessForOrganization":{ + "name":"EnableHealthServiceAccessForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "errors":[ + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Calling this operation enables AWS Health to work with AWS Organizations. This applies a Service Linked Role (SLR) to the master account in the organization. To learn more about the steps in this process, visit enabling service access for AWS Health in AWS Organizations. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's master account.

", + "idempotent":true } }, "shapes":{ @@ -114,7 +206,10 @@ "shape":"entityValue", "documentation":"

The ID of the affected entity.

" }, - "entityUrl":{"shape":"entityUrl"}, + "entityUrl":{ + "shape":"entityUrl", + "documentation":"

The URL of the affected entity.

" + }, "awsAccountId":{ "shape":"accountId", "documentation":"

The 12-digit AWS account number that contains the affected entity.

" @@ -134,6 +229,14 @@ }, "documentation":"

Information about an entity that is affected by a Health event.

" }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

EnableHealthServiceAccessForOrganization is already in progress. Wait for the action to complete before trying again. To get the current status, use the DescribeHealthServiceStatusForOrganization operation.

", + "exception":true + }, "DateTimeRange":{ "type":"structure", "members":{ @@ -148,13 +251,87 @@ }, "documentation":"

A range of dates and times that is used by the EventFilter and EntityFilter objects. If from is set and to is set: match items where the timestamp (startTime, endTime, or lastUpdatedTime) is between from and to inclusive. If from is set and to is not set: match items where the timestamp value is equal to or after from. If from is not set and to is set: match items where the timestamp value is equal to or before to.

" }, + "DescribeAffectedAccountsForOrganizationRequest":{ + "type":"structure", + "required":["eventArn"], + "members":{ + "eventArn":{ + "shape":"eventArn", + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" + }, + "maxResults":{ + "shape":"maxResults", + "documentation":"

The maximum number of items to return in one batch, between 10 and 100, inclusive.

" + } + } + }, + "DescribeAffectedAccountsForOrganizationResponse":{ + "type":"structure", + "members":{ + "affectedAccounts":{ + "shape":"affectedAccountsList", + "documentation":"

A JSON set of elements of the affected accounts.

" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" + } + } + }, + "DescribeAffectedEntitiesForOrganizationFailedSet":{ + "type":"list", + "member":{"shape":"OrganizationAffectedEntitiesErrorItem"} + }, + "DescribeAffectedEntitiesForOrganizationRequest":{ + "type":"structure", + "required":["organizationEntityFilters"], + "members":{ + "organizationEntityFilters":{ + "shape":"OrganizationEntityFiltersList", + "documentation":"

A JSON set of elements including the awsAccountId and the eventArn.

" + }, + "locale":{ + "shape":"locale", + "documentation":"

The locale (language) to return information in. English (en) is the default and the only supported value at this time.

" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" + }, + "maxResults":{ + "shape":"maxResults", + "documentation":"

The maximum number of items to return in one batch, between 10 and 100, inclusive.

" + } + } + }, + "DescribeAffectedEntitiesForOrganizationResponse":{ + "type":"structure", + "members":{ + "entities":{ + "shape":"EntityList", + "documentation":"

A JSON set of elements including the awsAccountId and its entityArn, entityValue and its entityArn, lastUpdatedTime, statusCode, and tags.

" + }, + "failedSet":{ + "shape":"DescribeAffectedEntitiesForOrganizationFailedSet", + "documentation":"

A JSON set of elements of the failed response, including the awsAccountId, errorMessage, errorName, and eventArn.

" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" + } + } + }, "DescribeAffectedEntitiesRequest":{ "type":"structure", "required":["filter"], "members":{ "filter":{ "shape":"EntityFilter", - "documentation":"

Values to narrow the results returned. At least one event ARN is required.

" + "documentation":"

Values to narrow the results returned. At least one event ARN is required.

" }, "locale":{ "shape":"locale", @@ -240,6 +417,41 @@ "type":"list", "member":{"shape":"EventDetailsErrorItem"} }, + "DescribeEventDetailsForOrganizationFailedSet":{ + "type":"list", + "member":{"shape":"OrganizationEventDetailsErrorItem"} + }, + "DescribeEventDetailsForOrganizationRequest":{ + "type":"structure", + "required":["organizationEventDetailFilters"], + "members":{ + "organizationEventDetailFilters":{ + "shape":"OrganizationEventDetailFiltersList", + "documentation":"

A set of JSON elements that includes the awsAccountId and the eventArn.

" + }, + "locale":{ + "shape":"locale", + "documentation":"

The locale (language) to return information in. English (en) is the default and the only supported value at this time.

" + } + } + }, + "DescribeEventDetailsForOrganizationResponse":{ + "type":"structure", + "members":{ + "successfulSet":{ + "shape":"DescribeEventDetailsForOrganizationSuccessfulSet", + "documentation":"

Information about the events that could be retrieved.

" + }, + "failedSet":{ + "shape":"DescribeEventDetailsForOrganizationFailedSet", + "documentation":"

Error messages for any events that could not be retrieved.

" + } + } + }, + "DescribeEventDetailsForOrganizationSuccessfulSet":{ + "type":"list", + "member":{"shape":"OrganizationEventDetails"} + }, "DescribeEventDetailsRequest":{ "type":"structure", "required":["eventArns"], @@ -305,6 +517,40 @@ } } }, + "DescribeEventsForOrganizationRequest":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"OrganizationEventFilter", + "documentation":"

Values to narrow the results returned.

" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" + }, + "maxResults":{ + "shape":"maxResults", + "documentation":"

The maximum number of items to return in one batch, between 10 and 100, inclusive.

" + }, + "locale":{ + "shape":"locale", + "documentation":"

The locale (language) to return information in. English (en) is the default and the only supported value at this time.

" + } + } + }, + "DescribeEventsForOrganizationResponse":{ + "type":"structure", + "members":{ + "events":{ + "shape":"OrganizationEventList", + "documentation":"

The events that match the specified filter criteria.

" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" + } + } + }, "DescribeEventsRequest":{ "type":"structure", "members":{ @@ -339,6 +585,15 @@ } } }, + "DescribeHealthServiceStatusForOrganizationResponse":{ + "type":"structure", + "members":{ + "healthServiceAccessStatusForOrganization":{ + "shape":"healthServiceAccessStatusForOrganization", + "documentation":"

Information about the status of enabling or disabling AWS Health Organizational View in your organization.

Valid values are ENABLED | DISABLED | PENDING.

" + } + } + }, "EntityAggregate":{ "type":"structure", "members":{ @@ -436,7 +691,25 @@ "documentation":"

The most recent status of the event. Possible values are open, closed, and upcoming.

" } }, - "documentation":"

Summary information about an event, returned by the DescribeEvents operation. The DescribeEventDetails operation also returns this information, as well as the EventDescription and additional event metadata.

" + "documentation":"

Summary information about an AWS Health event.

" + }, + "EventAccountFilter":{ + "type":"structure", + "required":[ + "eventArn", + "awsAccountId" + ], + "members":{ + "eventArn":{ + "shape":"eventArn", + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" + }, + "awsAccountId":{ + "shape":"accountId", + "documentation":"

The 12-digit AWS account numbers that contains the affected entities.

" + } + }, + "documentation":"

The values used to filter results from the DescribeEventDetailsForOrganization and DescribeAffectedEntitiesForOrganization operations.

" }, "EventAggregate":{ "type":"structure", @@ -517,7 +790,7 @@ }, "eventTypeCodes":{ "shape":"eventTypeList", - "documentation":"

A list of unique identifiers for event types. For example, \"AWS_EC2_SYSTEM_MAINTENANCE_EVENT\",\"AWS_RDS_MAINTENANCE_SCHEDULED\"

" + "documentation":"

A list of unique identifiers for event types. For example, \"AWS_EC2_SYSTEM_MAINTENANCE_EVENT\",\"AWS_RDS_MAINTENANCE_SCHEDULED\".

" }, "services":{ "shape":"serviceList", @@ -630,6 +903,165 @@ "documentation":"

The specified pagination token (nextToken) is not valid.

", "exception":true }, + "OrganizationAffectedEntitiesErrorItem":{ + "type":"structure", + "members":{ + "awsAccountId":{ + "shape":"accountId", + "documentation":"

The 12-digit AWS account numbers that contains the affected entities.

" + }, + "eventArn":{ + "shape":"eventArn", + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" + }, + "errorName":{ + "shape":"string", + "documentation":"

The name of the error.

" + }, + "errorMessage":{ + "shape":"string", + "documentation":"

The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. For example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT.

" + } + }, + "documentation":"

Error information returned when a DescribeAffectedEntitiesForOrganization operation cannot find or process a specific entity.

" + }, + "OrganizationEntityFiltersList":{ + "type":"list", + "member":{"shape":"EventAccountFilter"}, + "max":10, + "min":1 + }, + "OrganizationEvent":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"eventArn", + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" + }, + "service":{ + "shape":"service", + "documentation":"

The AWS service that is affected by the event. For example, EC2, RDS.

" + }, + "eventTypeCode":{ + "shape":"eventTypeCode", + "documentation":"

The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. For example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT.

" + }, + "eventTypeCategory":{ + "shape":"eventTypeCategory", + "documentation":"

The category of the event type.

" + }, + "region":{ + "shape":"region", + "documentation":"

The AWS Region name of the event.

" + }, + "startTime":{ + "shape":"timestamp", + "documentation":"

The date and time that the event began.

" + }, + "endTime":{ + "shape":"timestamp", + "documentation":"

The date and time that the event ended.

" + }, + "lastUpdatedTime":{ + "shape":"timestamp", + "documentation":"

The most recent date and time that the event was updated.

" + }, + "statusCode":{ + "shape":"eventStatusCode", + "documentation":"

The most recent status of the event. Possible values are open, closed, and upcoming.

" + } + }, + "documentation":"

Summary information about an event, returned by the DescribeEventsForOrganization operation.

" + }, + "OrganizationEventDetailFiltersList":{ + "type":"list", + "member":{"shape":"EventAccountFilter"}, + "max":10, + "min":1 + }, + "OrganizationEventDetails":{ + "type":"structure", + "members":{ + "awsAccountId":{ + "shape":"accountId", + "documentation":"

The 12-digit AWS account numbers that contains the affected entities.

" + }, + "event":{"shape":"Event"}, + "eventDescription":{"shape":"EventDescription"}, + "eventMetadata":{ + "shape":"eventMetadata", + "documentation":"

Additional metadata about the event.

" + } + }, + "documentation":"

Detailed information about an event. A combination of an Event object, an EventDescription object, and additional metadata about the event. Returned by the DescribeEventDetailsForOrganization operation.

" + }, + "OrganizationEventDetailsErrorItem":{ + "type":"structure", + "members":{ + "awsAccountId":{ + "shape":"accountId", + "documentation":"

Error information returned when a DescribeEventDetailsForOrganization operation cannot find a specified event.

" + }, + "eventArn":{ + "shape":"eventArn", + "documentation":"

The unique identifier for the event. Format: arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID . Example: Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

" + }, + "errorName":{ + "shape":"string", + "documentation":"

The name of the error.

" + }, + "errorMessage":{ + "shape":"string", + "documentation":"

A message that describes the error.

" + } + }, + "documentation":"

Error information returned when a DescribeEventDetailsForOrganization operation cannot find a specified event.

" + }, + "OrganizationEventFilter":{ + "type":"structure", + "members":{ + "eventTypeCodes":{ + "shape":"eventTypeList", + "documentation":"

A list of unique identifiers for event types. For example, \"AWS_EC2_SYSTEM_MAINTENANCE_EVENT\",\"AWS_RDS_MAINTENANCE_SCHEDULED\".

" + }, + "awsAccountIds":{ + "shape":"awsAccountIdsList", + "documentation":"

A list of 12-digit AWS account numbers that contains the affected entities.

" + }, + "services":{ + "shape":"serviceList", + "documentation":"

The AWS services associated with the event. For example, EC2, RDS.

" + }, + "regions":{ + "shape":"regionList", + "documentation":"

A list of AWS Regions.

" + }, + "startTime":{"shape":"DateTimeRange"}, + "endTime":{"shape":"DateTimeRange"}, + "lastUpdatedTime":{"shape":"DateTimeRange"}, + "entityArns":{ + "shape":"entityArnList", + "documentation":"

REPLACEME

" + }, + "entityValues":{ + "shape":"entityValueList", + "documentation":"

A list of entity identifiers, such as EC2 instance IDs (i-34ab692e) or EBS volumes (vol-426ab23e).

" + }, + "eventTypeCategories":{ + "shape":"eventTypeCategoryList", + "documentation":"

REPLACEME

" + }, + "eventStatusCodes":{ + "shape":"eventStatusCodeList", + "documentation":"

A list of event status codes.

" + } + }, + "documentation":"

The values to filter results from the DescribeEventsForOrganization operation.

" + }, + "OrganizationEventList":{ + "type":"list", + "member":{"shape":"OrganizationEvent"} + }, "UnsupportedLocale":{ "type":"structure", "members":{ @@ -640,17 +1072,30 @@ }, "accountId":{ "type":"string", - "pattern":"[0-9]{12}" + "max":12, + "pattern":"^\\S+$" + }, + "affectedAccountsList":{ + "type":"list", + "member":{"shape":"accountId"} }, "aggregateValue":{"type":"string"}, "availabilityZone":{ "type":"string", + "max":18, + "min":6, "pattern":"[a-z]{2}\\-[0-9a-z\\-]{4,16}" }, "availabilityZones":{ "type":"list", "member":{"shape":"availabilityZone"} }, + "awsAccountIdsList":{ + "type":"list", + "member":{"shape":"accountId"}, + "max":50, + "min":1 + }, "count":{"type":"integer"}, "dateTimeRangeList":{ "type":"list", @@ -660,7 +1105,8 @@ }, "entityArn":{ "type":"string", - "max":1600 + "max":1600, + "pattern":".{0,1600}" }, "entityArnList":{ "type":"list", @@ -682,13 +1128,11 @@ "max":3, "min":1 }, - "entityUrl":{ - "type":"string", - "pattern":"https?://.+\\.(amazon\\.com|amazonaws\\.com|amazonaws\\.cn|c2s\\.ic\\.gov|sc2s\\.sgov\\.gov|amazonaws-us-gov.com)/.*" - }, + "entityUrl":{"type":"string"}, "entityValue":{ "type":"string", - "max":256 + "max":256, + "pattern":".{0,256}" }, "entityValueList":{ "type":"list", @@ -734,7 +1178,8 @@ "eventType":{ "type":"string", "max":100, - "min":3 + "min":3, + "pattern":"[^:/]{3,100}" }, "eventTypeCategory":{ "type":"string", @@ -756,7 +1201,8 @@ "eventTypeCode":{ "type":"string", "max":100, - "min":3 + "min":3, + "pattern":"[a-zA-Z0-9\\_\\-]{3,100}" }, "eventTypeList":{ "type":"list", @@ -764,10 +1210,12 @@ "max":10, "min":1 }, + "healthServiceAccessStatusForOrganization":{"type":"string"}, "locale":{ "type":"string", "max":256, - "min":2 + "min":2, + "pattern":".{2,256}" }, "maxResults":{ "type":"integer", @@ -781,10 +1229,14 @@ }, "nextToken":{ "type":"string", - "pattern":"[a-zA-Z0-9=/+_.-]{4,512}" + "max":10000, + "min":4, + "pattern":"[a-zA-Z0-9=/+_.-]{4,10000}" }, "region":{ "type":"string", + "max":25, + "min":2, "pattern":"[^:/]{2,25}" }, "regionList":{ @@ -796,7 +1248,8 @@ "service":{ "type":"string", "max":30, - "min":2 + "min":2, + "pattern":"[^:/]{2,30}" }, "serviceList":{ "type":"list", @@ -812,7 +1265,8 @@ }, "tagKey":{ "type":"string", - "max":127 + "max":127, + "pattern":".{0,127}" }, "tagSet":{ "type":"map", @@ -822,9 +1276,10 @@ }, "tagValue":{ "type":"string", - "max":255 + "max":255, + "pattern":".{0,255}" }, "timestamp":{"type":"timestamp"} }, - "documentation":"AWS Health

The AWS Health API provides programmatic access to the AWS Health information that is presented in the AWS Personal Health Dashboard. You can get information about events that affect your AWS resources:

In addition, these operations provide information about event types and summary counts of events or affected entities:

The Health API requires a Business or Enterprise support plan from AWS Support. Calling the Health API from an account that does not have a Business or Enterprise support plan causes a SubscriptionRequiredException.

For authentication of requests, AWS Health uses the Signature Version 4 Signing Process.

See the AWS Health User Guide for information about how to use the API.

Service Endpoint

The HTTP endpoint for the AWS Health API is:

  • https://health.us-east-1.amazonaws.com

" + "documentation":"AWS Health

The AWS Health API provides programmatic access to the AWS Health information that is presented in the AWS Personal Health Dashboard. You can get information about events that affect your AWS resources:

In addition, these operations provide information about event types and summary counts of events or affected entities:

AWS Health integrates with AWS Organizations to provide a centralized view of AWS Health events across all accounts in your organization.

You can use the following operations to enable or disable AWS Health from working with AWS Organizations.

The Health API requires a Business or Enterprise support plan from AWS Support. Calling the Health API from an account that does not have a Business or Enterprise support plan causes a SubscriptionRequiredException.

For authentication of requests, AWS Health uses the Signature Version 4 Signing Process.

See the AWS Health User Guide for information about how to use the API.

Service Endpoint

The HTTP endpoint for the AWS Health API is:

  • https://health.us-east-1.amazonaws.com

" } diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index 14ffb3b6..8f3e7215 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -230,7 +230,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between AWS and the OIDC provider.

When you create the IAM OIDC provider, you specify the following:

  • The URL of the OIDC identity provider (IdP) to trust

  • A list of client IDs (also known as audiences) that identify the application or applications that are allowed to authenticate using the OIDC provider

  • A list of thumbprints of the server certificate(s) that the IdP uses

You get all of this information from the OIDC IdP that you want to use to access AWS.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" + "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between AWS and the OIDC provider.

When you create the IAM OIDC provider, you specify the following:

  • The URL of the OIDC identity provider (IdP) to trust

  • A list of client IDs (also known as audiences) that identify the application or applications that are allowed to authenticate using the OIDC provider

  • A list of thumbprints of one or more server certificates that the IdP uses

You get all of this information from the OIDC IdP that you want to use to access AWS.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" }, "CreatePolicy":{ "name":"CreatePolicy", @@ -1932,7 +1932,7 @@ {"shape":"InvalidInputException"}, {"shape":"PolicyEvaluationException"} ], - "documentation":"

Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API operations and AWS resources to determine the policies' effective permissions. The policies are provided as strings.

The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

If the output is long, you can use MaxItems and Marker parameters to paginate the results.

" + "documentation":"

Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API operations and AWS resources to determine the policies' effective permissions. The policies are provided as strings.

The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

If you want to simulate existing policies that are attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

Context keys are variables that are maintained by AWS and its services and which provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

If the output is long, you can use MaxItems and Marker parameters to paginate the results.

" }, "SimulatePrincipalPolicy":{ "name":"SimulatePrincipalPolicy", @@ -2563,7 +2563,7 @@ }, "NewPassword":{ "shape":"passwordType", - "documentation":"

The new password. The new password must conform to the AWS account's password policy, if one exists.

The regex pattern that is used to validate this parameter is a string of characters. That string can include almost any printable ASCII character from the space (\\u0020) through the end of the ASCII character range (\\u00FF). You can also include the tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) characters. Any of these characters are valid in a password. However, many tools, such as the AWS Management Console, might restrict the ability to type certain characters because they have special meaning within that tool.

" + "documentation":"

The new password. The new password must conform to the AWS account's password policy, if one exists.

The regex pattern that is used to validate this parameter is a string of characters. That string can include almost any printable ASCII character from the space (\\u0020) through the end of the ASCII character range (\\u00FF). You can also include the tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) characters. Any of these characters are valid in a password. However, many tools, such as the AWS Management Console, might restrict the ability to type certain characters because they have special meaning within that tool.

" } } }, @@ -2671,7 +2671,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "GroupName":{ "shape":"groupNameType", @@ -2700,7 +2700,7 @@ }, "Path":{ "shape":"pathType", - "documentation":"

The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" } } }, @@ -2728,7 +2728,7 @@ }, "Password":{ "shape":"passwordType", - "documentation":"

The new password for the user.

The regex pattern that is used to validate this parameter is a string of characters. That string can include almost any printable ASCII character from the space (\\u0020) through the end of the ASCII character range (\\u00FF). You can also include the tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) characters. Any of these characters are valid in a password. However, many tools, such as the AWS Management Console, might restrict the ability to type certain characters because they have special meaning within that tool.

" + "documentation":"

The new password for the user.

The regex pattern that is used to validate this parameter is a string of characters. That string can include almost any printable ASCII character from the space (\\u0020) through the end of the ASCII character range (\\u00FF). You can also include the tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) characters. Any of these characters are valid in a password. However, many tools, such as the AWS Management Console, might restrict the ability to type certain characters because they have special meaning within that tool.

" }, "PasswordResetRequired":{ "shape":"booleanType", @@ -2791,11 +2791,11 @@ }, "Path":{ "shape":"policyPathType", - "documentation":"

The path for the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path for the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The JSON policy document that you want to use as the content for the new policy.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The JSON policy document that you want to use as the content for the new policy.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "Description":{ "shape":"policyDescriptionType", @@ -2826,7 +2826,7 @@ }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The JSON policy document that you want to use as the content for this new version of the policy.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The JSON policy document that you want to use as the content for this new version of the policy.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "SetAsDefault":{ "shape":"booleanType", @@ -2853,7 +2853,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "RoleName":{ "shape":"roleNameType", @@ -2861,7 +2861,7 @@ }, "AssumeRolePolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The trust relationship policy document that grants an entity permission to assume the role.

In IAM, you must provide a JSON policy that has been converted to a string. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

Upon success, the response includes the same trust policy in JSON format.

" + "documentation":"

The trust relationship policy document that grants an entity permission to assume the role.

In IAM, you must provide a JSON policy that has been converted to a string. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

Upon success, the response includes the same trust policy in JSON format.

" }, "Description":{ "shape":"roleDescriptionType", @@ -2978,7 +2978,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path for the user name. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path for the user name. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "UserName":{ "shape":"userNameType", @@ -3010,7 +3010,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "VirtualMFADeviceName":{ "shape":"virtualMFADeviceName", @@ -3646,9 +3646,13 @@ "shape":"OrganizationsDecisionDetail", "documentation":"

A structure that details how Organizations and its service control policies affect the results of the simulation. Only applies if the simulated user's account is part of an organization.

" }, + "PermissionsBoundaryDecisionDetail":{ + "shape":"PermissionsBoundaryDecisionDetail", + "documentation":"

Contains information about the effect that a permissions boundary has on a policy simulation when the boundary is applied to an IAM entity.

" + }, "EvalDecisionDetails":{ "shape":"EvalDecisionDetailsType", - "documentation":"

Additional details about the results of the evaluation decision. When there are both IAM policies and resource policies, this parameter explains how each set of policies contributes to the final evaluation decision. When simulating cross-account access to a resource, both the resource-based policy and the caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based Policies

" + "documentation":"

Additional details about the results of the cross-account evaluation decision. This parameter is populated for only cross-account simulations. It contains a brief summary of how each policy type contributes to the final evaluation decision.

If the simulation evaluates policies within the same account and includes a resource ARN, then the parameter is present but the response is empty. If the simulation evaluates policies within the same account and specifies all resources (*), then the parameter is not returned.

When you make a cross-account request, AWS evaluates the request in the trusting account and the trusted account. The request is allowed only if both evaluations return true. For more information about how policies are evaluated, see Evaluating Policies Within a Single Account.

If an AWS Organizations SCP included in the evaluation denies access, the simulation ends. In this case, policy evaluation does not proceed any further and this parameter is not returned.

" }, "ResourceSpecificResults":{ "shape":"ResourceSpecificResultListType", @@ -3815,7 +3819,7 @@ "members":{ "PolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

A list of policies for which you want the list of context keys referenced in those policies. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

A list of policies for which you want the list of context keys referenced in those policies. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -3839,7 +3843,7 @@ }, "PolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

An optional list of additional policies for which you want the list of context keys that are referenced.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

An optional list of additional policies for which you want the list of context keys that are referenced.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -4749,7 +4753,7 @@ }, "PathPrefix":{ "shape":"policyPathType", - "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -4789,7 +4793,7 @@ }, "PathPrefix":{ "shape":"policyPathType", - "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -4829,7 +4833,7 @@ }, "PathPrefix":{ "shape":"policyPathType", - "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -4873,7 +4877,7 @@ }, "PathPrefix":{ "shape":"pathType", - "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "PolicyUsageFilter":{ "shape":"PolicyUsageType", @@ -4994,7 +4998,7 @@ "members":{ "PathPrefix":{ "shape":"pathPrefixType", - "documentation":"

The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ gets all groups whose path starts with /division_abc/subdivision_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all groups. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ gets all groups whose path starts with /division_abc/subdivision_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all groups. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -5067,7 +5071,7 @@ "members":{ "PathPrefix":{ "shape":"pathPrefixType", - "documentation":"

The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all instance profiles whose path starts with /application_abc/component_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all instance profiles. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all instance profiles whose path starts with /application_abc/component_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all instance profiles. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -5215,7 +5219,7 @@ }, "PathPrefix":{ "shape":"policyPathType", - "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "PolicyUsageFilter":{ "shape":"PolicyUsageType", @@ -5363,7 +5367,7 @@ "members":{ "PathPrefix":{ "shape":"pathPrefixType", - "documentation":"

The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all roles whose path starts with /application_abc/component_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all roles. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all roles whose path starts with /application_abc/component_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all roles. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -5449,7 +5453,7 @@ "members":{ "PathPrefix":{ "shape":"pathPrefixType", - "documentation":"

The path prefix for filtering the results. For example: /company/servercerts would get all server certificates for which the path starts with /company/servercerts.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all server certificates. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. For example: /company/servercerts would get all server certificates for which the path starts with /company/servercerts.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all server certificates. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -5616,7 +5620,7 @@ "members":{ "PathPrefix":{ "shape":"pathPrefixType", - "documentation":"

The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, which would get all user names whose path starts with /division_abc/subdivision_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all user names. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, which would get all user names whose path starts with /division_abc/subdivision_xyz/.

This parameter is optional. If it is not included, it defaults to a slash (/), listing all user names. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "Marker":{ "shape":"markerType", @@ -5913,6 +5917,16 @@ "type":"string", "enum":["PermissionsBoundaryPolicy"] }, + "PermissionsBoundaryDecisionDetail":{ + "type":"structure", + "members":{ + "AllowedByPermissionsBoundary":{ + "shape":"booleanType", + "documentation":"

Specifies whether an action is allowed by a permissions boundary that is applied to an IAM entity (user or role). A value of true means that the permissions boundary does not deny the action. This means that the policy includes an Allow statement that matches the request. In this case, if an identity-based policy also allows the action, the request is allowed. A value of false means that either the requested action is not allowed (implicitly denied) or that the action is explicitly denied by the permissions boundary. In both of these cases, the action is not allowed, regardless of the identity-based policy.

" + } + }, + "documentation":"

Contains information about the effect that a permissions boundary has on a policy simulation when the boundary is applied to an IAM entity.

" + }, "Policy":{ "type":"structure", "members":{ @@ -6163,7 +6177,7 @@ }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -6202,7 +6216,7 @@ }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -6241,7 +6255,7 @@ }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -6394,7 +6408,11 @@ }, "EvalDecisionDetails":{ "shape":"EvalDecisionDetailsType", - "documentation":"

Additional details about the results of the evaluation decision. When there are both IAM policies and resource policies, this parameter explains how each set of policies contributes to the final evaluation decision. When simulating cross-account access to a resource, both the resource-based policy and the caller's IAM policy must grant access.

" + "documentation":"

Additional details about the results of the evaluation decision on a single resource. This parameter is returned only for cross-account simulations. This parameter explains how each policy type contributes to the resource-specific evaluation decision.

" + }, + "PermissionsBoundaryDecisionDetail":{ + "shape":"PermissionsBoundaryDecisionDetail", + "documentation":"

Contains information about the effect that a permissions boundary has on a policy simulation when that boundary is applied to an IAM entity.

" } }, "documentation":"

Contains the result of the simulation of a single API operation call on a single resource.

This data type is used by a member of the EvaluationResult data type.

" @@ -6942,7 +6960,11 @@ "members":{ "PolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be \"scope-down\" policies, such as you could include in a call to GetFederationToken or one of the AssumeRole API operations. In other words, do not use policies designed to restrict what a user can do while using the temporary credentials.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be \"scope-down\" policies, such as you could include in a call to GetFederationToken or one of the AssumeRole API operations. In other words, do not use policies designed to restrict what a user can do while using the temporary credentials.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + }, + "PermissionsBoundaryPolicyInputList":{ + "shape":"SimulationPolicyListType", + "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that an IAM entity can have. You can input only one permissions boundary when you pass a policy to this operation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string that contains the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "ActionNames":{ "shape":"ActionNameListType", @@ -6954,7 +6976,7 @@ }, "ResourcePolicy":{ "shape":"policyDocumentType", - "documentation":"

A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "ResourceOwner":{ "shape":"ResourceNameType", @@ -7013,7 +7035,11 @@ }, "PolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

An optional list of additional policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

An optional list of additional policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + }, + "PermissionsBoundaryPolicyInputList":{ + "shape":"SimulationPolicyListType", + "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permission boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "ActionNames":{ "shape":"ActionNameListType", @@ -7025,7 +7051,7 @@ }, "ResourcePolicy":{ "shape":"policyDocumentType", - "documentation":"

A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "ResourceOwner":{ "shape":"ResourceNameType", @@ -7270,7 +7296,7 @@ }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The policy that grants an entity permission to assume the role.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The policy that grants an entity permission to assume the role.

You must provide policies in JSON format in IAM. However, for AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -7284,7 +7310,7 @@ }, "NewPath":{ "shape":"pathType", - "documentation":"

New path for the IAM group. Only include this if changing the group's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

New path for the IAM group. Only include this if changing the group's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "NewGroupName":{ "shape":"groupNameType", @@ -7302,7 +7328,7 @@ }, "Password":{ "shape":"passwordType", - "documentation":"

The new password for the specified IAM user.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

However, the format can be further restricted by the account administrator by setting a password policy on the AWS account. For more information, see UpdateAccountPasswordPolicy.

" + "documentation":"

The new password for the specified IAM user.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

However, the format can be further restricted by the account administrator by setting a password policy on the AWS account. For more information, see UpdateAccountPasswordPolicy.

" }, "PasswordResetRequired":{ "shape":"booleanObjectType", @@ -7435,7 +7461,7 @@ }, "NewPath":{ "shape":"pathType", - "documentation":"

The new path for the server certificate. Include this only if you are updating the server certificate's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The new path for the server certificate. Include this only if you are updating the server certificate's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "NewServerCertificateName":{ "shape":"serverCertificateNameType", @@ -7495,7 +7521,7 @@ }, "NewPath":{ "shape":"pathType", - "documentation":"

New path for the IAM user. Include this parameter only if you're changing the user's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

New path for the IAM user. Include this parameter only if you're changing the user's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "NewUserName":{ "shape":"userNameType", @@ -7516,7 +7542,7 @@ }, "SSHPublicKeyBody":{ "shape":"publicKeyMaterialType", - "documentation":"

The SSH public key. The public key must be encoded in ssh-rsa format or PEM format. The minimum bit-length of the public key is 2048 bits. For example, you can generate a 2048-bit key, and the resulting PEM file is 1679 bytes long.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The SSH public key. The public key must be encoded in ssh-rsa format or PEM format. The minimum bit-length of the public key is 2048 bits. For example, you can generate a 2048-bit key, and the resulting PEM file is 1679 bytes long.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -7540,7 +7566,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path for the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/). This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the path parameter. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/).

" + "documentation":"

The path for the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/). This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the path parameter. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/).

" }, "ServerCertificateName":{ "shape":"serverCertificateNameType", @@ -7548,15 +7574,15 @@ }, "CertificateBody":{ "shape":"certificateBodyType", - "documentation":"

The contents of the public key certificate in PEM-encoded format.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The contents of the public key certificate in PEM-encoded format.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "PrivateKey":{ "shape":"privateKeyType", - "documentation":"

The contents of the private key in PEM-encoded format.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The contents of the private key in PEM-encoded format.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "CertificateChain":{ "shape":"certificateChainType", - "documentation":"

The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, @@ -7580,7 +7606,7 @@ }, "CertificateBody":{ "shape":"certificateBodyType", - "documentation":"

The contents of the signing certificate.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The contents of the signing certificate.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" } } }, diff --git a/botocore/data/imagebuilder/2019-12-02/service-2.json b/botocore/data/imagebuilder/2019-12-02/service-2.json index d311dd4a..1066370f 100644 --- a/botocore/data/imagebuilder/2019-12-02/service-2.json +++ b/botocore/data/imagebuilder/2019-12-02/service-2.json @@ -31,7 +31,7 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

CancelImageCreation cancels the creation of Image. This operation may only be used on images in a non-terminal state.

" + "documentation":"

CancelImageCreation cancels the creation of Image. This operation can only be used on images in a non-terminal state.

" }, "CreateComponent":{ "name":"CreateComponent", @@ -53,7 +53,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a new component that can be used to build, validate, test and assess your image.

" + "documentation":"

Creates a new component that can be used to build, validate, test, and assess your image.

" }, "CreateDistributionConfiguration":{ "name":"CreateDistributionConfiguration", @@ -75,7 +75,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a new distribution configuration. Distribution configurations define and configure the outputs of your pipeline.

" + "documentation":"

Creates a new distribution configuration. Distribution configurations define and configure the outputs of your pipeline.

" }, "CreateImage":{ "name":"CreateImage", @@ -138,7 +138,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Creates a new image recipe. Image Recipes defines how images are configured, tested and assessed.

" + "documentation":"

Creates a new image recipe. Image recipes define how images are configured, tested, and assessed.

" }, "CreateInfrastructureConfiguration":{ "name":"CreateInfrastructureConfiguration", @@ -435,7 +435,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Gets a infrastructure configuration.

" + "documentation":"

Gets an infrastructure configuration.

" }, "ImportComponent":{ "name":"ImportComponent", @@ -457,7 +457,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Imports a component and transforms its data into a component document.

" + "documentation":"

Imports a component and transforms its data into a component document.

" }, "ListComponentBuildVersions":{ "name":"ListComponentBuildVersions", @@ -495,7 +495,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of component build versions for the specified semantic version.

" + "documentation":"

Returns the list of component build versions for the specified semantic version.

" }, "ListDistributionConfigurations":{ "name":"ListDistributionConfigurations", @@ -829,7 +829,7 @@ "members":{ "region":{ "shape":"NonEmptyString", - "documentation":"

The region of the EC2 AMI.

" + "documentation":"

The AWS Region of the EC2 AMI.

" }, "image":{ "shape":"NonEmptyString", @@ -851,7 +851,7 @@ "type":"structure", "members":{ "name":{ - "shape":"NonEmptyString", + "shape":"AmiNameString", "documentation":"

The name of the distribution configuration.

" }, "description":{ @@ -860,19 +860,25 @@ }, "amiTags":{ "shape":"TagMap", - "documentation":"

The tags to apply to AMIs distributed to this region.

" + "documentation":"

The tags to apply to AMIs distributed to this Region.

" }, "launchPermission":{ "shape":"LaunchPermissionConfiguration", "documentation":"

Launch permissions can be used to configure which AWS accounts can use the AMI to launch instances.

" } }, - "documentation":"

Define and configure the outputs AMIs of the pipeline.

" + "documentation":"

Define and configure the output AMIs of the pipeline.

" }, "AmiList":{ "type":"list", "member":{"shape":"Ami"} }, + "AmiNameString":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[-_A-Za-z0-9{][-_A-Za-z0-9\\s:{}]+[-_A-Za-z0-9}]$" + }, "Arn":{"type":"string"}, "ArnList":{ "type":"list", @@ -896,11 +902,12 @@ "members":{ "imageBuildVersionArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image whose creation you wish to cancel.

" + "documentation":"

The Amazon Resource Name (ARN) of the image whose creation you want to cancel.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

", + "idempotencyToken":true } } }, @@ -1002,7 +1009,7 @@ "required":["componentArn"], "members":{ "componentArn":{ - "shape":"ComponentBuildVersionArn", + "shape":"ComponentVersionArnOrBuildVersionArn", "documentation":"

The Amazon Resource Name (ARN) of the component.

" } }, @@ -1062,7 +1069,7 @@ "documentation":"

The tags associated with the component.

" } }, - "documentation":"

A high level summary of a component.

" + "documentation":"

A high-level summary of a component.

" }, "ComponentSummaryList":{ "type":"list", @@ -1111,12 +1118,16 @@ "documentation":"

The date that the component was created.

" } }, - "documentation":"

A high level overview of a component semantic version.

" + "documentation":"

A high-level overview of a component semantic version.

" }, "ComponentVersionArn":{ "type":"string", "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):component/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" }, + "ComponentVersionArnOrBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):component/[a-z0-9-_]+/(?:(?:(\\d+|x)\\.(\\d+|x)\\.(\\d+|x))|(?:\\d+\\.\\d+\\.\\d+/\\d+))$" + }, "ComponentVersionList":{ "type":"list", "member":{"shape":"ComponentVersion"} @@ -1136,27 +1147,27 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component. This version to follow the semantic version syntax. i.e. major.minor.patch. This could be versioned like software 2.0.1 or date like 2019.12.01.

" + "documentation":"

The semantic version of the component. This version follows the semantic version syntax. For example, major.minor.patch. This could be versioned like software (2.0.1) or like a date (2019.12.01).

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

CThe description of the component. Describes the contents of the component.

" + "documentation":"

The description of the component. Describes the contents of the component.

" }, "changeDescription":{ "shape":"NonEmptyString", - "documentation":"

CThe change description of the component. Describes what change has been made in this version. In other words what makes this version different from other versions of this component.

" + "documentation":"

The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

" }, "platform":{ "shape":"Platform", - "documentation":"

CThe platform of the component.

" + "documentation":"

The platform of the component.

" }, "data":{ "shape":"InlineComponentData", - "documentation":"

CThe data of the component.

" + "documentation":"

The data of the component. Used to specify the data inline. Either data or uri can be used to specify the data within the component.

" }, "uri":{ "shape":"Uri", - "documentation":"

CThe uri of the component.

" + "documentation":"

The uri of the component. Must be an S3 URL and the requester must have permission to access the S3 bucket. If you use S3, you can specify component content up to your service quota. Either data or uri can be used to specify the data within the component.

" }, "kmsKeyId":{ "shape":"NonEmptyString", @@ -1164,11 +1175,11 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

CThe tags of the component.

" + "documentation":"

The tags of the component.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

CThe idempotency token of the component.

", + "documentation":"

The idempotency token of the component.

", "idempotencyToken":true } } @@ -1178,15 +1189,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

CThe request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

CThe idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "componentBuildVersionArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

CThe Amazon Resource Name (ARN) of the component that was created by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the component that was created by this request.

" } } }, @@ -1379,7 +1390,7 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested and assessed.

" + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested, and assessed.

" }, "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", @@ -1439,7 +1450,7 @@ }, "instanceTypes":{ "shape":"InstanceTypeList", - "documentation":"

The instance types of the infrastructure configuration. You may specify one or more instance types to use for this build, the service will pick one of these instance types based on availability.

" + "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" }, "instanceProfileName":{ "shape":"NonEmptyString", @@ -1451,7 +1462,7 @@ }, "subnetId":{ "shape":"NonEmptyString", - "documentation":"

The subnet ID to place the instance used to customize your EC2 AMI in.

" + "documentation":"

The subnet ID in which to place the instance used to customize your EC2 AMI.

" }, "logging":{ "shape":"Logging", @@ -1459,14 +1470,14 @@ }, "keyPair":{ "shape":"NonEmptyString", - "documentation":"

The key pair of the infrastructure configuration. This can be used to log onto and debug the instance used to create your image.

" + "documentation":"

The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

" }, "terminateInstanceOnFailure":{ "shape":"NullableBoolean", - "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you wish for Image Builder to retain the instance used to configure your AMI in the event that the build or test phase of your workflow failed.

" + "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

" }, "snsTopicArn":{ - "shape":"NonEmptyString", + "shape":"SnsTopicArn", "documentation":"

The SNS topic on which to send image build events.

" }, "tags":{ @@ -1654,18 +1665,18 @@ "members":{ "region":{ "shape":"NonEmptyString", - "documentation":"

" + "documentation":"

The target Region.

" }, "amiDistributionConfiguration":{ "shape":"AmiDistributionConfiguration", - "documentation":"

" + "documentation":"

The specific AMI settings (for example, launch permissions, AMI tags).

" }, "licenseConfigurationArns":{ "shape":"ArnList", - "documentation":"

" + "documentation":"

The License Manager Configuration to associate with the AMI in the specified Region.

" } }, - "documentation":"

" + "documentation":"

Defines the settings for a specific Region.

" }, "DistributionConfiguration":{ "type":"structure", @@ -1738,7 +1749,7 @@ "documentation":"

The tags associated with the distribution configuration.

" } }, - "documentation":"

A high level overview a distribution configuration.

" + "documentation":"

A high-level overview of a distribution configuration.

" }, "DistributionConfigurationSummaryList":{ "type":"list", @@ -1785,7 +1796,7 @@ "documentation":"

Use to override the device's volume type.

" } }, - "documentation":"

EBS specific block device mapping specifications.

" + "documentation":"

Amazon EBS-specific block device mapping specifications.

" }, "EbsIopsInteger":{ "type":"integer", @@ -1807,20 +1818,25 @@ "st1" ] }, + "EmptyString":{ + "type":"string", + "max":0, + "min":0 + }, "ErrorMessage":{"type":"string"}, "Filter":{ "type":"structure", "members":{ "name":{ "shape":"FilterName", - "documentation":"

" + "documentation":"

The name of the filter. Filter names are case-sensitive.

" }, "values":{ "shape":"FilterValues", - "documentation":"

" + "documentation":"

The filter values. Filter values are case-sensitive.

" } }, - "documentation":"

" + "documentation":"

A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

" }, "FilterList":{ "type":"list", @@ -1857,7 +1873,7 @@ "members":{ "componentArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component whose policy you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the component whose policy you want to retrieve.

", "location":"querystring", "locationName":"componentArn" } @@ -1882,7 +1898,7 @@ "members":{ "componentBuildVersionArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component that you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the component that you want to retrieve. Regex requires \"/\\d+$\" suffix.

", "location":"querystring", "locationName":"componentBuildVersionArn" } @@ -1907,7 +1923,7 @@ "members":{ "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you want to retrieve.

", "location":"querystring", "locationName":"distributionConfigurationArn" } @@ -1932,7 +1948,7 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to retrieve.

", "location":"querystring", "locationName":"imagePipelineArn" } @@ -1957,7 +1973,7 @@ "members":{ "imageArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image whose policy you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image whose policy you want to retrieve.

", "location":"querystring", "locationName":"imageArn" } @@ -1982,7 +1998,7 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe whose policy you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image recipe whose policy you want to retrieve.

", "location":"querystring", "locationName":"imageRecipeArn" } @@ -2007,7 +2023,7 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that you want to retrieve.

", "location":"querystring", "locationName":"imageRecipeArn" } @@ -2032,7 +2048,7 @@ "members":{ "imageBuildVersionArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image that you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image that you want to retrieve.

", "location":"querystring", "locationName":"imageBuildVersionArn" } @@ -2057,7 +2073,7 @@ "members":{ "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that you want to retrieve.

", "location":"querystring", "locationName":"infrastructureConfigurationArn" } @@ -2076,14 +2092,14 @@ "documentation":"

The infrastructure configuration object.

" } }, - "documentation":"

GetInfrastructureConfiguration response object.

" + "documentation":"

GetInfrastructureConfiguration response object.

" }, "IdempotentParameterMismatchException":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You have specified an client token for an operation using parameter values that differ from a previous request that used the same client token.

", + "documentation":"

You have specified a client token for an operation using parameter values that differ from a previous request that used the same client token.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -2182,7 +2198,7 @@ }, "infrastructureConfigurationArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastruction configuration associated with this image pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.

" }, "distributionConfigurationArn":{ "shape":"Arn", @@ -2335,7 +2351,7 @@ "documentation":"

The reason for the image's status.

" } }, - "documentation":"

Image state shows the images status and the reason for that status.

" + "documentation":"

Image state shows the image status and the reason for that status.

" }, "ImageStatus":{ "type":"string", @@ -2408,7 +2424,7 @@ }, "timeoutMinutes":{ "shape":"ImageTestsTimeoutMinutes", - "documentation":"

The maximum time in minutes that tests are permitted to run for.

" + "documentation":"

The maximum time in minutes that tests are permitted to run.

" } }, "documentation":"

Image tests configuration.

" @@ -2423,7 +2439,7 @@ "members":{ "arn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the image semantic verion.

" + "documentation":"

The Amazon Resource Name (ARN) of the image semantic version.

" }, "name":{ "shape":"ResourceName", @@ -2473,23 +2489,23 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component. This version to follow the semantic version syntax. i.e. major.minor.patch. This could be versioned like software 2.0.1 or date like 2019.12.01.

" + "documentation":"

The semantic version of the component. This version follows the semantic version syntax. For example, major.minor.patch. This could be versioned like software (2.0.1) or like a date (2019.12.01).

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the component. Describes the contents of the component.

" + "documentation":"

The description of the component. Describes the contents of the component.

" }, "changeDescription":{ "shape":"NonEmptyString", - "documentation":"

The change description of the component. Describes what change has been made in this version. In other words what makes this version different from other versions of this component.

" + "documentation":"

The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

" }, "type":{ "shape":"ComponentType", - "documentation":"

The type of the component denotes whether the component is used to build the image or only to test it.

" + "documentation":"

The type of the component denotes whether the component is used to build the image or only to test it.

" }, "format":{ "shape":"ComponentFormat", - "documentation":"

The format of the resource that you wish to import as a component.

" + "documentation":"

The format of the resource that you want to import as a component.

" }, "platform":{ "shape":"Platform", @@ -2497,11 +2513,11 @@ }, "data":{ "shape":"NonEmptyString", - "documentation":"

The data of the component.

" + "documentation":"

The data of the component. Used to specify the data inline. Either data or uri can be used to specify the data within the component.

" }, "uri":{ "shape":"Uri", - "documentation":"

The uri of the component.

" + "documentation":"

The uri of the component. Must be an S3 URL and the requester must have permission to access the S3 bucket. If you use S3, you can specify component content up to your service quota. Either data or uri can be used to specify the data within the component.

" }, "kmsKeyId":{ "shape":"NonEmptyString", @@ -2540,47 +2556,47 @@ "members":{ "arn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastruction configuration.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration.

" }, "name":{ "shape":"ResourceName", - "documentation":"

The name of the infrastruction configuration.

" + "documentation":"

The name of the infrastructure configuration.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the infrastruction configuration.

" + "documentation":"

The description of the infrastructure configuration.

" }, "instanceTypes":{ "shape":"InstanceTypeList", - "documentation":"

The instance types of the infrastruction configuration.

" + "documentation":"

The instance types of the infrastructure configuration.

" }, "instanceProfileName":{ "shape":"NonEmptyString", - "documentation":"

The instance profile of the infrastruction configuration.

" + "documentation":"

The instance profile of the infrastructure configuration.

" }, "securityGroupIds":{ "shape":"SecurityGroupIds", - "documentation":"

The security group IDs of the infrastruction configuration.

" + "documentation":"

The security group IDs of the infrastructure configuration.

" }, "subnetId":{ "shape":"NonEmptyString", - "documentation":"

The subnet ID of the infrastruction configuration.

" + "documentation":"

The subnet ID of the infrastructure configuration.

" }, "logging":{ "shape":"Logging", - "documentation":"

The logging configuration of the infrastruction configuration.

" + "documentation":"

The logging configuration of the infrastructure configuration.

" }, "keyPair":{ "shape":"NonEmptyString", - "documentation":"

The EC2 key pair of the infrastruction configuration.

" + "documentation":"

The EC2 key pair of the infrastructure configuration.

" }, "terminateInstanceOnFailure":{ "shape":"NullableBoolean", - "documentation":"

The terminate instance on failure configuration of the infrastruction configuration.

" + "documentation":"

The terminate instance on failure configuration of the infrastructure configuration.

" }, "snsTopicArn":{ "shape":"NonEmptyString", - "documentation":"

The SNS Topic Amazon Resource Name (ARN) of the infrastruction configuration.

" + "documentation":"

The SNS topic Amazon Resource Name (ARN) of the infrastructure configuration.

" }, "dateCreated":{ "shape":"DateTime", @@ -2592,7 +2608,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of the infrastruction configuration.

" + "documentation":"

The tags of the infrastructure configuration.

" } }, "documentation":"

Details of the infrastructure configuration.

" @@ -2649,14 +2665,14 @@ }, "ebs":{ "shape":"EbsInstanceBlockDeviceSpecification", - "documentation":"

Use to manage EBS specific configuration for this mapping.

" + "documentation":"

Use to manage Amazon EBS-specific configuration for this mapping.

" }, "virtualName":{ "shape":"NonEmptyString", "documentation":"

Use to manage instance ephemeral devices.

" }, "noDevice":{ - "shape":"NonEmptyString", + "shape":"EmptyString", "documentation":"

Use to remove a mapping from the parent image.

" } }, @@ -2730,14 +2746,14 @@ "members":{ "userIds":{ "shape":"AccountList", - "documentation":"

" + "documentation":"

The AWS account ID.

" }, "userGroups":{ "shape":"StringList", - "documentation":"

" + "documentation":"

The name of the group.

" } }, - "documentation":"

" + "documentation":"

Describes the configuration for a launch permission. The launch permission modification request is sent to the EC2 ModifyImageAttribute API on behalf of the user for each Region they have selected to distribute the AMI.

" }, "ListComponentBuildVersionsRequest":{ "type":"structure", @@ -2745,7 +2761,7 @@ "members":{ "componentVersionArn":{ "shape":"ComponentVersionArn", - "documentation":"

The component version arn whose versions you wish to list.

" + "documentation":"

The component version Amazon Resource Name (ARN) whose versions you want to list.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2771,7 +2787,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2780,11 +2796,11 @@ "members":{ "owner":{ "shape":"Ownership", - "documentation":"

The owner defines whose components you wish to list. By default this request will only show components owned by your account. You may use this field to specify if you wish to view components owned by yourself, Amazon, or those components that have been shared with you by other customers.

" + "documentation":"

The owner defines which components you want to list. By default, this request will only show components owned by your account. You can use this field to specify if you want to view components owned by yourself, by Amazon, or those components that have been shared with you by other customers.

" }, "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2810,7 +2826,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2819,7 +2835,7 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2845,7 +2861,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2855,11 +2871,11 @@ "members":{ "imageVersionArn":{ "shape":"ImageVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image whose build versions you wish to retrieve.

" + "documentation":"

The Amazon Resource Name (ARN) of the image whose build versions you want to retrieve.

" }, "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2885,20 +2901,21 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, "ListImagePipelineImagesRequest":{ "type":"structure", + "required":["imagePipelineArn"], "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline whose images you wish to view.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline whose images you want to view.

" }, "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2924,7 +2941,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2933,7 +2950,7 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2959,7 +2976,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2968,11 +2985,11 @@ "members":{ "owner":{ "shape":"Ownership", - "documentation":"

The owner defines whose image recipes you wish to list. By default this request will only show image recipes owned by your account. You may use this field to specify if you wish to view image recipes owned by yourself, Amazon, or those image recipes that have been shared with you by other customers.

" + "documentation":"

The owner defines which image recipes you want to list. By default, this request will only show image recipes owned by your account. You can use this field to specify if you want to view image recipes owned by yourself, by Amazon, or those image recipes that have been shared with you by other customers.

" }, "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -2998,7 +3015,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -3007,11 +3024,11 @@ "members":{ "owner":{ "shape":"Ownership", - "documentation":"

The owner defines whose images you wish to list. By default this request will only show images owned by your account. You may use this field to specify if you wish to view images owned by yourself, Amazon, or those images that have been shared with you by other customers.

" + "documentation":"

The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to view images owned by yourself, by Amazon, or those images that have been shared with you by other customers.

" }, "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3037,7 +3054,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -3046,7 +3063,7 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", @@ -3072,7 +3089,7 @@ }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty then there are additional elements that the service that not include in this request. Use this token with the next request to retrieve additional object.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -3082,7 +3099,7 @@ "members":{ "resourceArn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you wish to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

", "location":"uri", "locationName":"resourceArn" } @@ -3102,10 +3119,10 @@ "members":{ "s3Logs":{ "shape":"S3Logs", - "documentation":"

The S3 logging configuration.

" + "documentation":"

The Amazon S3 logging configuration.

" } }, - "documentation":"

Logging configuration defines where Image Builder uploads your logs to.

" + "documentation":"

Logging configuration defines where Image Builder uploads your logs.

" }, "NonEmptyString":{ "type":"string", @@ -3118,10 +3135,10 @@ "members":{ "amis":{ "shape":"AmiList", - "documentation":"

The EC2 AMIs created by this image.

" + "documentation":"

The EC2 AMIs created by this image.

" } }, - "documentation":"

The resources produced by this image.

" + "documentation":"

The resources produced by this image.

" }, "Ownership":{ "type":"string", @@ -3256,7 +3273,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You have attempted to mutate or delete a resource with a dependency that is prohibitting this action. See the error message for more details.

", + "documentation":"

You have attempted to mutate or delete a resource with a dependency that prohibits this action. See the error message for more details.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3265,7 +3282,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The resource that you are trying to operate on is currently in use. Review the message details, and retry later.

", + "documentation":"

The resource that you are trying to operate on is currently in use. Review the message details and retry later.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3292,25 +3309,25 @@ "members":{ "s3BucketName":{ "shape":"NonEmptyString", - "documentation":"

The S3 bucket in which to store the logs.

" + "documentation":"

The Amazon S3 bucket in which to store the logs.

" }, "s3KeyPrefix":{ "shape":"NonEmptyString", - "documentation":"

The S3 path in which to store the logs.

" + "documentation":"

The Amazon S3 path in which to store the logs.

" } }, - "documentation":"

S3 Logging configuration.

" + "documentation":"

Amazon S3 logging configuration.

" }, "Schedule":{ "type":"structure", "members":{ "scheduleExpression":{ "shape":"NonEmptyString", - "documentation":"

The expression determines how often a pipeline starts the creation of new images.

" + "documentation":"

The expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition.

" }, "pipelineExecutionStartCondition":{ "shape":"PipelineExecutionStartCondition", - "documentation":"

The condition configures when the pipeline should trigger a new image build.

" + "documentation":"

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, EC2 Image Builder will build a new image only when there are known changes pending. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time.

" } }, "documentation":"

A schedule configures how often and when a pipeline will automatically create a new image.

" @@ -3337,6 +3354,10 @@ "error":{"httpStatusCode":503}, "exception":true }, + "SnsTopicArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:sns:[^:]+:\\d{12}:[a-zA-Z0-9-_]{1,256}$" + }, "StartImagePipelineExecutionRequest":{ "type":"structure", "required":[ @@ -3346,7 +3367,7 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you wish to manually invoke.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to manually invoke.

" }, "clientToken":{ "shape":"ClientToken", @@ -3404,7 +3425,7 @@ "members":{ "resourceArn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource that you wish to tag.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to tag.

", "location":"uri", "locationName":"resourceArn" }, @@ -3432,7 +3453,7 @@ "members":{ "resourceArn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource that you wish to untag.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to untag.

", "location":"uri", "locationName":"resourceArn" }, @@ -3453,12 +3474,13 @@ "type":"structure", "required":[ "distributionConfigurationArn", + "distributions", "clientToken" ], "members":{ "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you wish to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you want to update.

" }, "description":{ "shape":"NonEmptyString", @@ -3496,12 +3518,14 @@ "type":"structure", "required":[ "imagePipelineArn", + "imageRecipeArn", + "infrastructureConfigurationArn", "clientToken" ], "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you wish to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to update.

" }, "description":{ "shape":"NonEmptyString", @@ -3559,12 +3583,13 @@ "type":"structure", "required":[ "infrastructureConfigurationArn", + "instanceProfileName", "clientToken" ], "members":{ "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that you wish to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that you want to update.

" }, "description":{ "shape":"NonEmptyString", @@ -3572,7 +3597,7 @@ }, "instanceTypes":{ "shape":"InstanceTypeList", - "documentation":"

The instance types of the infrastructure configuration. You may specify one or more instance types to use for this build, the service will pick one of these instance types based on availability.

" + "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" }, "instanceProfileName":{ "shape":"NonEmptyString", @@ -3592,14 +3617,14 @@ }, "keyPair":{ "shape":"NonEmptyString", - "documentation":"

The key pair of the infrastructure configuration. This can be used to log onto and debug the instance used to create your image.

" + "documentation":"

The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

" }, "terminateInstanceOnFailure":{ "shape":"NullableBoolean", - "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you wish for Image Builder to retain the instance used to configure your AMI in the event that the build or test phase of your workflow failed.

" + "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

" }, "snsTopicArn":{ - "shape":"NonEmptyString", + "shape":"SnsTopicArn", "documentation":"

The SNS topic on which to send image build events.

" }, "clientToken":{ @@ -3632,5 +3657,5 @@ "pattern":"^[0-9]+\\.[0-9]+\\.[0-9]+$" } }, - "documentation":"

Amazon Elastic Compute Cloud Image Builder provides a one-stop-shop to automate the image management processes. You configure an automated pipeline that creates images for use on AWS. As software updates become available, Image Builder automatically produces a new image based on a customizable schedule and distributes it to stipulated AWS Regions after running tests on it. With the Image Builder, organizations can capture their internal or industry-specific compliance policies as a vetted template that can be consistently applied to every new image. Built-in integration with AWS Organizations provides customers with a centralized way to enforce image distribution and access policies across their AWS accounts and Regions. Image Builder supports multiple image format AMIs on AWS.

" + "documentation":"

EC2 Image Builder is a fully managed AWS service that makes it easier to automate the creation, management, and deployment of customized, secure, and up-to-date “golden” server images that are pre-installed and pre-configured with software and settings to meet specific IT standards.

" } diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 106e7842..c19c2a72 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -4545,6 +4545,16 @@ }, "documentation":"

Configuration for the rollout of OTA updates.

" }, + "AwsJobPresignedUrlConfig":{ + "type":"structure", + "members":{ + "expiresInSec":{ + "shape":"ExpiresInSeconds", + "documentation":"

How long (in seconds) pre-signed URLs are valid. Valid values are 60 - 3600, the default value is 1800 seconds. Pre-signed URLs are generated when a request for the job document is received.

" + } + }, + "documentation":"

Configuration information for pre-signed URLs. Valid when protocols contains HTTP.

" + }, "Behavior":{ "type":"structure", "required":["name"], @@ -5655,6 +5665,10 @@ "shape":"Targets", "documentation":"

The targeted devices to receive OTA updates.

" }, + "protocols":{ + "shape":"Protocols", + "documentation":"

The protocol used to transfer the OTA update image. Valid values are [HTTP], [MQTT], [HTTP, MQTT]. When both HTTP and MQTT are specified, the target device can choose the protocol.

" + }, "targetSelection":{ "shape":"TargetSelection", "documentation":"

Specifies whether the update will continue to run (CONTINUOUS), or will be complete after all the things specified as targets have completed the update (SNAPSHOT). If continuous, the update may also be run on a thing when a change is detected in a target. For example, an update will run on a thing when the thing is added to a target group, even after the update was completed by all things originally in the group. Valid values: CONTINUOUS | SNAPSHOT.

" @@ -5663,6 +5677,10 @@ "shape":"AwsJobExecutionsRolloutConfig", "documentation":"

Configuration for the rollout of OTA updates.

" }, + "awsJobPresignedUrlConfig":{ + "shape":"AwsJobPresignedUrlConfig", + "documentation":"

Configuration information for pre-signed URLs.

" + }, "files":{ "shape":"OTAUpdateFiles", "documentation":"

The files to be streamed by the OTA update.

" @@ -8254,6 +8272,7 @@ "max":3600, "min":60 }, + "ExpiresInSeconds":{"type":"long"}, "ExplicitDeny":{ "type":"structure", "members":{ @@ -11824,10 +11843,18 @@ "shape":"Targets", "documentation":"

The targets of the OTA update.

" }, + "protocols":{ + "shape":"Protocols", + "documentation":"

The protocol used to transfer the OTA update image. Valid values are [HTTP], [MQTT], [HTTP, MQTT]. When both HTTP and MQTT are specified, the target device can choose the protocol.

" + }, "awsJobExecutionsRolloutConfig":{ "shape":"AwsJobExecutionsRolloutConfig", "documentation":"

Configuration for the rollout of OTA updates.

" }, + "awsJobPresignedUrlConfig":{ + "shape":"AwsJobPresignedUrlConfig", + "documentation":"

Configuration information for pre-signed URLs. Valid when protocols contains HTTP.

" + }, "targetSelection":{ "shape":"TargetSelection", "documentation":"

Specifies whether the OTA update will continue to run (CONTINUOUS), or will be complete after all those things specified as targets have completed the OTA update (SNAPSHOT). If continuous, the OTA update may also be run on a thing when a change is detected in a target. For example, an OTA update will run on a thing when the thing is added to a target group, even after the OTA update was completed by all things originally in the group.

" @@ -12102,6 +12129,19 @@ "type":"list", "member":{"shape":"ProcessingTargetName"} }, + "Protocol":{ + "type":"string", + "enum":[ + "MQTT", + "HTTP" + ] + }, + "Protocols":{ + "type":"list", + "member":{"shape":"Protocol"}, + "max":2, + "min":1 + }, "ProvisioningTemplateListing":{ "type":"list", "member":{"shape":"ProvisioningTemplateSummary"} @@ -12188,11 +12228,11 @@ }, "assetId":{ "shape":"AssetId", - "documentation":"

The ID of the AWS IoT SiteWise asset. You must specify either a propertyAlias or both an analiasId and a propertyId. Accepts substitution templates.

" + "documentation":"

The ID of the AWS IoT SiteWise asset. You must specify either a propertyAlias or both an aliasId and a propertyId. Accepts substitution templates.

" }, "propertyId":{ "shape":"AssetPropertyId", - "documentation":"

The ID of the asset's property. You must specify either a propertyAlias or both an analiasId and a propertyId. Accepts substitution templates.

" + "documentation":"

The ID of the asset's property. You must specify either a propertyAlias or both an aliasId and a propertyId. Accepts substitution templates.

" }, "propertyAlias":{ "shape":"AssetPropertyAlias", @@ -13863,7 +13903,7 @@ }, "mqttContext":{ "shape":"MqttContext", - "documentation":"

Specifies a test MQTT authorization request.>

" + "documentation":"

Specifies a test MQTT authorization request.

" }, "tlsContext":{ "shape":"TlsContext", @@ -14232,7 +14272,7 @@ } }, "documentation":"

The rate exceeds the limit.

", - "error":{"httpStatusCode":429}, + "error":{"httpStatusCode":400}, "exception":true }, "TimedOutThings":{"type":"integer"}, diff --git a/botocore/data/iotevents/2018-07-27/service-2.json b/botocore/data/iotevents/2018-07-27/service-2.json index c33fc28f..a94b311d 100644 --- a/botocore/data/iotevents/2018-07-27/service-2.json +++ b/botocore/data/iotevents/2018-07-27/service-2.json @@ -322,19 +322,19 @@ }, "lambda":{ "shape":"LambdaAction", - "documentation":"

Calls an AWS Lambda function, passing in information about the detector model instance and the event which triggered the action.

" + "documentation":"

Calls a Lambda function, passing in information about the detector model instance and the event that triggered the action.

" }, "iotEvents":{ "shape":"IotEventsAction", - "documentation":"

Sends an IoT Events input, passing in information about the detector model instance and the event which triggered the action.

" + "documentation":"

Sends an IoT Events input, passing in information about the detector model instance and the event that triggered the action.

" }, "sqs":{ "shape":"SqsAction", - "documentation":"

Sends information about the detector model instance and the event which triggered the action to an Amazon SQS queue.

" + "documentation":"

Sends information about the detector model instance and the event that triggered the action to an Amazon SQS queue.

" }, "firehose":{ "shape":"FirehoseAction", - "documentation":"

Sends information about the detector model instance and the event which triggered the action to a Kinesis Data Firehose delivery stream.

" + "documentation":"

Sends information about the detector model instance and the event that triggered the action to a Kinesis Data Firehose delivery stream.

" } }, "documentation":"

An action to be performed when the \"condition\" is TRUE.

" @@ -408,7 +408,7 @@ }, "key":{ "shape":"AttributeJsonPath", - "documentation":"

The input attribute key used to identify a device or system to create a detector (an instance of the detector model) and then to route each input received to the appropriate detector (instance). This parameter uses a JSON-path expression to specify the attribute-value pair in the message payload of each input that is used to identify the device associated with the input.

" + "documentation":"

The input attribute key used to identify a device or system in order to create a detector (an instance of the detector model) and then to route each input received to the appropriate detector (instance). This parameter uses a JSON-path expression to specify the attribute-value pair in the message payload of each input that is used to identify the device associated with the input.

" }, "roleArn":{ "shape":"AmazonResourceName", @@ -420,7 +420,7 @@ }, "evaluationMethod":{ "shape":"EvaluationMethod", - "documentation":"

When set to SERIAL, variables are updated and event conditions evaluated in the order that the events are defined. When set to BATCH, variables are updated and events performed only after all event conditions are evaluated.

" + "documentation":"

Information about the order in which events are evaluated and how actions are executed.

" } } }, @@ -636,11 +636,11 @@ }, "key":{ "shape":"AttributeJsonPath", - "documentation":"

The input attribute key used to identify a device or system to create a detector (an instance of the detector model) and then to route each input received to the appropriate detector (instance). This parameter uses a JSON-path expression to specify the attribute-value pair in the message payload of each input that is used to identify the device associated with the input.

" + "documentation":"

The input attribute key used to identify a device or system in order to create a detector (an instance of the detector model) and then to route each input received to the appropriate detector (instance). This parameter uses a JSON-path expression to specify the attribute-value pair in the message payload of each input that is used to identify the device associated with the input.

" }, "evaluationMethod":{ "shape":"EvaluationMethod", - "documentation":"

When set to SERIAL, variables are updated and event conditions evaluated in the order that the events are defined. When set to BATCH, variables are updated and events performed only after all event conditions are evaluated.

" + "documentation":"

Information about the order in which events are evaluated and how actions are executed.

" } }, "documentation":"

Information about how the detector model is configured.

" @@ -749,7 +749,7 @@ }, "evaluationMethod":{ "shape":"EvaluationMethod", - "documentation":"

When set to SERIAL, variables are updated and event conditions evaluated in the order that the events are defined. When set to BATCH, variables are updated and events performed only after all event conditions are evaluated.

" + "documentation":"

Information about the order in which events are evaluated and how actions are executed.

" } }, "documentation":"

Information about the detector model version.

" @@ -801,7 +801,7 @@ "documentation":"

A character separator that is used to separate records written to the Kinesis Data Firehose delivery stream. Valid values are: '\\n' (newline), '\\t' (tab), '\\r\\n' (Windows newline), ',' (comma).

" } }, - "documentation":"

Sends information about the detector model instance and the event which triggered the action to a Kinesis Data Firehose delivery stream.

" + "documentation":"

Sends information about the detector model instance and the event that triggered the action to a Kinesis Data Firehose delivery stream.

" }, "FirehoseSeparator":{ "type":"string", @@ -957,7 +957,7 @@ "documentation":"

The name of the AWS IoT Events input where the data is sent.

" } }, - "documentation":"

Sends an IoT Events input, passing in information about the detector model instance and the event which triggered the action.

" + "documentation":"

Sends an AWS IoT Events input, passing in information about the detector model instance and the event that triggered the action.

" }, "IotTopicPublishAction":{ "type":"structure", @@ -982,10 +982,10 @@ "members":{ "functionArn":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the AWS Lambda function which is executed.

" + "documentation":"

The ARN of the Lambda function that is executed.

" } }, - "documentation":"

Calls an AWS Lambda function, passing in information about the detector model instance and the event which triggered the action.

" + "documentation":"

Calls a Lambda function, passing in information about the detector model instance and the event that triggered the action.

" }, "LimitExceededException":{ "type":"structure", @@ -1331,14 +1331,14 @@ "members":{ "queueUrl":{ "shape":"QueueUrl", - "documentation":"

The URL of the Amazon SQS queue where the data is written.

" + "documentation":"

The URL of the SQS queue where the data is written.

" }, "useBase64":{ "shape":"UseBase64", "documentation":"

Set this to TRUE if you want the data to be Base-64 encoded before it is written to the queue. Otherwise, set this to FALSE.

" } }, - "documentation":"

Sends information about the detector model instance and the event which triggered the action to an Amazon SQS queue.

" + "documentation":"

Sends information about the detector model instance and the event that triggered the action to an Amazon SQS queue.

" }, "State":{ "type":"structure", @@ -1549,7 +1549,7 @@ }, "evaluationMethod":{ "shape":"EvaluationMethod", - "documentation":"

When set to SERIAL, variables are updated and event conditions evaluated in the order that the events are defined. When set to BATCH, variables are updated and events performed only after all event conditions are evaluated.

" + "documentation":"

Information about the order in which events are evaluated and how actions are executed.

" } } }, diff --git a/botocore/data/kafka/2018-11-14/paginators-1.json b/botocore/data/kafka/2018-11-14/paginators-1.json index 075d2aff..ddc3b3d4 100644 --- a/botocore/data/kafka/2018-11-14/paginators-1.json +++ b/botocore/data/kafka/2018-11-14/paginators-1.json @@ -29,6 +29,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Revisions" + }, + "ListKafkaVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "KafkaVersions" } } } diff --git a/botocore/data/kafka/2018-11-14/service-2.json b/botocore/data/kafka/2018-11-14/service-2.json index da5e9a3d..8edfcbd5 100644 --- a/botocore/data/kafka/2018-11-14/service-2.json +++ b/botocore/data/kafka/2018-11-14/service-2.json @@ -483,6 +483,39 @@ ], "documentation": "\n

Returns a list of all the MSK configurations in this Region.

\n " }, + "ListKafkaVersions": { + "name": "ListKafkaVersions", + "http": { + "method": "GET", + "requestUri": "/v1/kafka-versions", + "responseCode": 200 + }, + "input": { + "shape": "ListKafkaVersionsRequest" + }, + "output": { + "shape": "ListKafkaVersionsResponse" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "\n

The request isn't valid because the input is incorrect. Correct your input and then submit it again.

\n " + }, + { + "shape": "UnauthorizedException", + "documentation": "\n

The request is not authorized. The provided credentials couldn't be validated.

\n " + }, + { + "shape": "InternalServerErrorException", + "documentation": "\n

There was an unexpected internal server error. Retrying your request might resolve the issue.

\n " + }, + { + "shape": "ForbiddenException", + "documentation": "\n

Access forbidden. Check your credentials and then retry your request.

\n " + } + ], + "documentation": "\n

Returns a list of Kafka versions.

\n " + }, "ListNodes": { "name": "ListNodes", "http": { @@ -978,6 +1011,10 @@ "locationName": "state", "documentation": "\n

The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.

\n " }, + "StateInfo" : { + "shape" : "StateInfo", + "locationName" : "stateInfo" + }, "Tags": { "shape": "__mapOf__string", "locationName": "tags", @@ -1653,6 +1690,26 @@ "httpStatusCode": 500 } }, + "KafkaVersion": { + "type": "structure", + "members": { + "Version": { + "shape": "__string", + "locationName": "version" + }, + "Status": { + "shape": "KafkaVersionStatus", + "locationName": "status" + } + } + }, + "KafkaVersionStatus": { + "type": "string", + "enum": [ + "ACTIVE", + "DEPRECATED" + ] + }, "ListClusterOperationsRequest": { "type": "structure", "members": { @@ -1805,6 +1862,36 @@ } } }, + "ListKafkaVersionsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "\n

The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "\n

The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. To get the next batch, provide this token in your next request.

" + } + } + }, + "ListKafkaVersionsResponse": { + "type": "structure", + "members": { + "KafkaVersions": { + "shape": "__listOfKafkaVersion", + "locationName": "kafkaVersions" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken" + } + } + }, "ListNodesRequest": { "type": "structure", "members": { @@ -2093,6 +2180,19 @@ "httpStatusCode": 503 } }, + "StateInfo" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message" + } + } + }, "StorageInfo": { "type": "structure", "members": { @@ -2445,6 +2545,12 @@ "shape": "ConfigurationRevision" } }, + "__listOfKafkaVersion": { + "type": "list", + "member": { + "shape": "KafkaVersion" + } + }, "__listOfNodeInfo": { "type": "list", "member": { @@ -2493,4 +2599,4 @@ } }, "documentation": "\n

The operations for managing an Amazon MSK cluster.

\n " -} \ No newline at end of file +} diff --git a/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json b/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json index 7cea8016..b4a17a61 100644 --- a/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json +++ b/botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json @@ -2953,7 +2953,8 @@ "type":"string", "enum":[ "SQL-1_0", - "FLINK-1_6" + "FLINK-1_6", + "FLINK-1_8" ] }, "S3ApplicationCodeLocationDescription":{ diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index aba0fdb5..54d175e6 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -45,7 +45,7 @@ {"shape":"KMSInternalException"}, {"shape":"CloudHsmClusterInvalidConfigurationException"} ], - "documentation":"

Connects or reconnects a custom key store to its associated AWS CloudHSM cluster.

The custom key store must be connected before you can create customer master keys (CMKs) in the key store or use the CMKs it contains. You can disconnect and reconnect a custom key store at any time.

To connect a custom key store, its associated AWS CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation.

The connection process can take an extended amount of time to complete; up to 20 minutes. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

During the connection process, AWS KMS finds the AWS CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the AWS CloudHSM client as the kmsuser crypto user (CU), and rotates its password.

The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

If you are having trouble connecting or disconnecting a custom key store, see Troubleshooting a Custom Key Store in the AWS Key Management Service Developer Guide.

" + "documentation":"

Connects or reconnects a custom key store to its associated AWS CloudHSM cluster.

The custom key store must be connected before you can create customer master keys (CMKs) in the key store or use the CMKs it contains. You can disconnect and reconnect a custom key store at any time.

To connect a custom key store, its associated AWS CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents AWS KMS from using this account to log in.

The connection process can take an extended amount of time to complete; up to 20 minutes. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

During the connection process, AWS KMS finds the AWS CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the AWS CloudHSM client as the kmsuser CU, and rotates its password.

The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

If you are having trouble connecting or disconnecting a custom key store, see Troubleshooting a Custom Key Store in the AWS Key Management Service Developer Guide.

" }, "CreateAlias":{ "name":"CreateAlias", @@ -124,7 +124,7 @@ {"shape":"CustomKeyStoreInvalidStateException"}, {"shape":"CloudHsmClusterInvalidConfigurationException"} ], - "documentation":"

Creates a unique customer managed customer master key (CMK) in your AWS account and Region. You cannot use this operation to create a CMK in a different AWS account.

You can use the CreateKey operation to create symmetric or asymmetric CMKs.

  • Symmetric CMKs contain a 256-bit symmetric key that never leaves AWS KMS unencrypted. To use the CMK, you must call AWS KMS. You can use a symmetric CMK to encrypt and decrypt small amounts of data, but they are typically used to generate data keys or data key pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

  • Asymmetric CMKs can contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric CMK never leaves AWS KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of AWS KMS. CMKs with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). CMKs with ECC key pairs can be used only to sign and verify messages.

For information about symmetric and asymmetric CMKs, see Using Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

To create different types of CMKs, use the following guidance:

Asymmetric CMKs

To create an asymmetric CMK, use the CustomerMasterKeySpec parameter to specify the type of key material in the CMK. Then, use the KeyUsage parameter to determine whether the CMK will be used to encrypt and decrypt or sign and verify. You can't change these properties after the CMK is created.

Symmetric CMKs

When creating a symmetric CMK, you don't need to specify the CustomerMasterKeySpec or KeyUsage parameters. The default value for CustomerMasterKeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, are the only valid values for symmetric CMKs.

Imported Key Material

To import your own key material, begin by creating a symmetric CMK with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the AWS Key Management Service Developer Guide . You cannot import the key material into an asymmetric CMK.

Custom Key Stores

To create a symmetric CMK in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The AWS CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the AWS Region.

You cannot create an asymmetric CMK in a custom key store. For information about custom key stores in AWS KMS see Using Custom Key Stores in the AWS Key Management Service Developer Guide .

" + "documentation":"

Creates a unique customer managed customer master key (CMK) in your AWS account and Region. You cannot use this operation to create a CMK in a different AWS account.

You can use the CreateKey operation to create symmetric or asymmetric CMKs.

  • Symmetric CMKs contain a 256-bit symmetric key that never leaves AWS KMS unencrypted. To use the CMK, you must call AWS KMS. You can use a symmetric CMK to encrypt and decrypt small amounts of data, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

  • Asymmetric CMKs can contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric CMK never leaves AWS KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of AWS KMS. CMKs with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). CMKs with ECC key pairs can be used only to sign and verify messages.

For information about symmetric and asymmetric CMKs, see Using Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

To create different types of CMKs, use the following guidance:

Asymmetric CMKs

To create an asymmetric CMK, use the CustomerMasterKeySpec parameter to specify the type of key material in the CMK. Then, use the KeyUsage parameter to determine whether the CMK will be used to encrypt and decrypt or sign and verify. You can't change these properties after the CMK is created.

Symmetric CMKs

When creating a symmetric CMK, you don't need to specify the CustomerMasterKeySpec or KeyUsage parameters. The default value for CustomerMasterKeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, are the only valid values for symmetric CMKs.

Imported Key Material

To import your own key material, begin by creating a symmetric CMK with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the AWS Key Management Service Developer Guide . You cannot import the key material into an asymmetric CMK.

Custom Key Stores

To create a symmetric CMK in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The AWS CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the AWS Region.

You cannot create an asymmetric CMK in a custom key store. For information about custom key stores in AWS KMS see Using Custom Key Stores in the AWS Key Management Service Developer Guide .

" }, "Decrypt":{ "name":"Decrypt", @@ -348,7 +348,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a unique symmetric data key. This operation returns a plaintext copy of the data key and a copy that is encrypted under a customer master key (CMK) that you specify. You can use the plaintext key to encrypt your data outside of AWS KMS and store the encrypted data key with the encrypted data.

GenerateDataKey returns a unique data key for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the data key.

To generate a data key, specify the symmetric CMK that will be used to encrypt the data key. You cannot use an asymmetric CMK to generate data keys.

You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

If the operation succeeds, the plaintext copy of the data key is in the Plaintext field of the response, and the encrypted copy of the data key in the CiphertextBlob field.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

We recommend that you use the following pattern to encrypt data locally in your application:

  1. Use the GenerateDataKey operation to get a data encryption key.

  2. Use the plaintext data key (returned in the Plaintext field of the response) to encrypt data locally, then erase the plaintext data key from memory.

  3. Store the encrypted data key (returned in the CiphertextBlob field of the response) alongside the locally encrypted data.

To decrypt data locally:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data locally, then erase the plaintext data key from memory.

" + "documentation":"

Generates a unique symmetric data key. This operation returns a plaintext copy of the data key and a copy that is encrypted under a customer master key (CMK) that you specify. You can use the plaintext key to encrypt your data outside of AWS KMS and store the encrypted data key with the encrypted data.

GenerateDataKey returns a unique data key for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the data key.

To generate a data key, specify the symmetric CMK that will be used to encrypt the data key. You cannot use an asymmetric CMK to generate data keys. To get the type of your CMK, use the DescribeKey operation.

You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

If the operation succeeds, the plaintext copy of the data key is in the Plaintext field of the response, and the encrypted copy of the data key in the CiphertextBlob field.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

We recommend that you use the following pattern to encrypt data locally in your application:

  1. Use the GenerateDataKey operation to get a data encryption key.

  2. Use the plaintext data key (returned in the Plaintext field of the response) to encrypt data locally, then erase the plaintext data key from memory.

  3. Store the encrypted data key (returned in the CiphertextBlob field of the response) alongside the locally encrypted data.

To decrypt data locally:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data locally, then erase the plaintext data key from memory.

" }, "GenerateDataKeyPair":{ "name":"GenerateDataKeyPair", @@ -408,7 +408,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a unique symmetric data key. This operation returns a data key that is encrypted under a customer master key (CMK) that you specify. To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that returns only the encrypted copy of the data key. This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

GenerateDataKeyWithoutPlaintext returns a unique data key for each request. The bytes in the keys are not related to the caller or CMK that is used to encrypt the private key.

To generate a data key, you must specify the symmetric customer master key (CMK) that is used to encrypt the data key. You cannot use an asymmetric CMK to generate a data key. To get the type of your CMK, use the KeySpec field in the DescribeKey response. You must also specify the length of the data key using either the KeySpec or NumberOfBytes field (but not both). For common key lengths (128-bit and 256-bit symmetric keys), use the KeySpec parameter.

If the operation succeeds, you will find the plaintext copy of the data key in the Plaintext field of the response, and the encrypted copy of the data key in the CiphertextBlob field.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique symmetric data key. This operation returns a data key that is encrypted under a customer master key (CMK) that you specify. To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that returns only the encrypted copy of the data key. This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

GenerateDataKeyWithoutPlaintext returns a unique data key for each request. The bytes in the keys are not related to the caller or CMK that is used to encrypt the private key.

To generate a data key, you must specify the symmetric customer master key (CMK) that is used to encrypt the data key. You cannot use an asymmetric CMK to generate a data key. To get the type of your CMK, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateRandom":{ "name":"GenerateRandom", @@ -538,7 +538,7 @@ {"shape":"InvalidArnException"}, {"shape":"NotFoundException"} ], - "documentation":"

Gets a list of aliases in the caller's AWS account and region. You cannot list aliases in other accounts. For more information about aliases, see CreateAlias.

By default, the ListAliases command returns all aliases in the account and region. To get only the aliases that point to a particular customer master key (CMK), use the KeyId parameter.

The ListAliases response can include aliases that you created and associated with your customer managed CMKs, and aliases that AWS created and associated with AWS managed CMKs in your account. You can recognize AWS aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

The response might also include aliases that have no TargetKeyId field. These are predefined aliases that AWS has created but has not yet associated with a CMK. Aliases that AWS creates in your account, including predefined aliases, do not count against your AWS KMS aliases limit.

" + "documentation":"

Gets a list of aliases in the caller's AWS account and region. You cannot list aliases in other accounts. For more information about aliases, see CreateAlias.

By default, the ListAliases command returns all aliases in the account and region. To get only the aliases that point to a particular customer master key (CMK), use the KeyId parameter.

The ListAliases response can include aliases that you created and associated with your customer managed CMKs, and aliases that AWS created and associated with AWS managed CMKs in your account. You can recognize AWS aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

The response might also include aliases that have no TargetKeyId field. These are predefined aliases that AWS has created but has not yet associated with a CMK. Aliases that AWS creates in your account, including predefined aliases, do not count against your AWS KMS aliases quota.

" }, "ListGrants":{ "name":"ListGrants", @@ -986,7 +986,10 @@ "NETWORK_ERRORS", "INTERNAL_ERROR", "INSUFFICIENT_CLOUDHSM_HSMS", - "USER_LOCKED_OUT" + "USER_LOCKED_OUT", + "USER_NOT_FOUND", + "USER_LOGGED_IN", + "SUBNET_NOT_FOUND" ] }, "ConnectionStateType":{ @@ -1039,7 +1042,7 @@ }, "KeyStorePassword":{ "shape":"KeyStorePasswordType", - "documentation":"

Enter the password of the kmsuser crypto user (CU) account in the specified AWS CloudHSM cluster. AWS KMS logs into the cluster as this user to manage key material on your behalf.

This parameter tells AWS KMS the kmsuser account password; it does not change the password in the AWS CloudHSM cluster.

" + "documentation":"

Enter the password of the kmsuser crypto user (CU) account in the specified AWS CloudHSM cluster. AWS KMS logs into the cluster as this user to manage key material on your behalf.

The password must be a string of 7 to 32 characters. Its value is case sensitive.

This parameter tells AWS KMS the kmsuser account password; it does not change the password in the AWS CloudHSM cluster.

" } } }, @@ -1108,7 +1111,7 @@ "members":{ "Policy":{ "shape":"PolicyType", - "documentation":"

The key policy to attach to the CMK.

If you provide a key policy, it must meet the following criteria:

  • If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy must allow the principal that is making the CreateKey request to make a subsequent PutKeyPolicy request on the CMK. This reduces the risk that the CMK becomes unmanageable. For more information, refer to the scenario in the Default Key Policy section of the AWS Key Management Service Developer Guide .

  • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to AWS KMS. When you create a new AWS principal (for example, an IAM user or role), you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to AWS KMS. For more information, see Changes that I make are not always immediately visible in the AWS Identity and Access Management User Guide.

If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

The key policy size limit is 32 kilobytes (32768 bytes).

" + "documentation":"

The key policy to attach to the CMK.

If you provide a key policy, it must meet the following criteria:

  • If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy must allow the principal that is making the CreateKey request to make a subsequent PutKeyPolicy request on the CMK. This reduces the risk that the CMK becomes unmanageable. For more information, refer to the scenario in the Default Key Policy section of the AWS Key Management Service Developer Guide .

  • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to AWS KMS. When you create a new AWS principal (for example, an IAM user or role), you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to AWS KMS. For more information, see Changes that I make are not always immediately visible in the AWS Identity and Access Management User Guide.

If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

The key policy size quota is 32 kilobytes (32768 bytes).

" }, "Description":{ "shape":"DescriptionType", @@ -1120,7 +1123,7 @@ }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", - "documentation":"

Specifies the type of CMK to create. The CustomerMasterKeySpec determines whether the CMK contains a symmetric key or an asymmetric key pair. It also determines the encryption algorithms or signing algorithms that the CMK supports. You can't change the CustomerMasterKeySpec after the CMK is created. To further restrict the algorithms that can be used with the CMK, use its key policy or IAM policy.

For help with choosing a key spec for your CMK, see Selecting a Customer Master Key Spec in the AWS Key Management Service Developer Guide.

The default value, SYMMETRIC_DEFAULT, creates a CMK with a 256-bit symmetric key.

AWS KMS supports the following key specs for CMKs:

  • Symmetric key (default)

    • SYMMETRIC_DEFAULT (AES-256-GCM)

  • Asymmetric RSA key pairs

    • RSA_2048

    • RSA_3072

    • RSA_4096

  • Asymmetric NIST-recommended elliptic curve key pairs

    • ECC_NIST_P256 (secp256r1)

    • ECC_NIST_P384 (secp384r1)

    • ECC_NIST_P521 (secp521r1)

  • Other asymmetric elliptic curve key pairs

    • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

" + "documentation":"

Specifies the type of CMK to create. The default value, SYMMETRIC_DEFAULT, creates a CMK with a 256-bit symmetric key for encryption and decryption. For help choosing a key spec for your CMK, see How to Choose Your CMK Configuration in the AWS Key Management Service Developer Guide.

The CustomerMasterKeySpec determines whether the CMK contains a symmetric key or an asymmetric key pair. It also determines the encryption algorithms or signing algorithms that the CMK supports. You can't change the CustomerMasterKeySpec after the CMK is created. To further restrict the algorithms that can be used with the CMK, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm or kms:Signing Algorithm in the AWS Key Management Service Developer Guide.

AWS services that are integrated with AWS KMS use symmetric CMKs to protect your data. These services do not support asymmetric CMKs. For help determining whether a CMK is symmetric or asymmetric, see Identifying Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

AWS KMS supports the following key specs for CMKs:

  • Symmetric key (default)

    • SYMMETRIC_DEFAULT (AES-256-GCM)

  • Asymmetric RSA key pairs

    • RSA_2048

    • RSA_3072

    • RSA_4096

  • Asymmetric NIST-recommended elliptic curve key pairs

    • ECC_NIST_P256 (secp256r1)

    • ECC_NIST_P384 (secp384r1)

    • ECC_NIST_P521 (secp521r1)

  • Other asymmetric elliptic curve key pairs

    • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

" }, "Origin":{ "shape":"OriginType", @@ -1216,11 +1219,11 @@ }, "ConnectionState":{ "shape":"ConnectionStateType", - "documentation":"

Indicates whether the custom key store is connected to its AWS CloudHSM cluster.

You can create and use CMKs in your custom key stores only when its connection state is CONNECTED.

The value is DISCONNECTED if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If the value is CONNECTED but you are having trouble using the custom key store, make sure that its associated AWS CloudHSM cluster is active and contains at least one active HSM.

A value of FAILED indicates that an attempt to connect was unsuccessful. For help resolving a connection failure, see Troubleshooting a Custom Key Store in the AWS Key Management Service Developer Guide.

" + "documentation":"

Indicates whether the custom key store is connected to its AWS CloudHSM cluster.

You can create and use CMKs in your custom key stores only when its connection state is CONNECTED.

The value is DISCONNECTED if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If the value is CONNECTED but you are having trouble using the custom key store, make sure that its associated AWS CloudHSM cluster is active and contains at least one active HSM.

A value of FAILED indicates that an attempt to connect was unsuccessful. The ConnectionErrorCode field in the response indicates the cause of the failure. For help resolving a connection failure, see Troubleshooting a Custom Key Store in the AWS Key Management Service Developer Guide.

" }, "ConnectionErrorCode":{ "shape":"ConnectionErrorCodeType", - "documentation":"

Describes the connection error. Valid values are:

  • CLUSTER_NOT_FOUND - AWS KMS cannot find the AWS CloudHSM cluster with the specified cluster ID.

  • INSUFFICIENT_CLOUDHSM_HSMS - The associated AWS CloudHSM cluster does not contain any active HSMs. To connect a custom key store to its AWS CloudHSM cluster, the cluster must contain at least one active HSM.

  • INTERNAL_ERROR - AWS KMS could not complete the request due to an internal error. Retry the request. For ConnectCustomKeyStore requests, disconnect the custom key store before trying to connect again.

  • INVALID_CREDENTIALS - AWS KMS does not have the correct password for the kmsuser crypto user in the AWS CloudHSM cluster.

  • NETWORK_ERRORS - Network errors are preventing AWS KMS from connecting to the custom key store.

  • USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated AWS CloudHSM cluster due to too many failed password attempts. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the password value for the custom key store.

For help with connection failures, see Troubleshooting Custom Key Stores in the AWS Key Management Service Developer Guide.

" + "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED. For help resolving these errors, see How to Fix a Connection Failure in AWS Key Management Service Developer Guide.

Valid values are:

  • CLUSTER_NOT_FOUND - AWS KMS cannot find the AWS CloudHSM cluster with the specified cluster ID.

  • INSUFFICIENT_CLOUDHSM_HSMS - The associated AWS CloudHSM cluster does not contain any active HSMs. To connect a custom key store to its AWS CloudHSM cluster, the cluster must contain at least one active HSM.

  • INTERNAL_ERROR - AWS KMS could not complete the request due to an internal error. Retry the request. For ConnectCustomKeyStore requests, disconnect the custom key store before trying to connect again.

  • INVALID_CREDENTIALS - AWS KMS does not have the correct password for the kmsuser crypto user in the AWS CloudHSM cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the key store password value for the custom key store.

  • NETWORK_ERRORS - Network errors are preventing AWS KMS from connecting to the custom key store.

  • SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster configuration was deleted. If AWS KMS cannot find all of the subnets that were configured for the cluster when the custom key store was created, attempts to connect fail. To fix this error, create a cluster from a backup and associate it with your custom key store. This process includes selecting a VPC and subnets. For details, see How to Fix a Connection Failure in the AWS Key Management Service Developer Guide.

  • USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated AWS CloudHSM cluster due to too many failed password attempts. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the key store password value for the custom key store.

  • USER_LOGGED_IN - The kmsuser CU account is logged into the the associated AWS CloudHSM cluster. This prevents AWS KMS from rotating the kmsuser account password and logging into the cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must log the kmsuser CU out of the cluster. If you changed the kmsuser password to log into the cluster, you must also and update the key store password value for the custom key store. For help, see How to Log Out and Reconnect in the AWS Key Management Service Developer Guide.

  • USER_NOT_FOUND - AWS KMS cannot find a kmsuser CU account in the associated AWS CloudHSM cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must create a kmsuser CU account in the cluster, and then update the key store password value for the custom key store.

" }, "CreationDate":{ "shape":"DateType", @@ -1622,7 +1625,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the CMK that encrypts the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Specifies the CMK that encrypts the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1647,7 +1650,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the CMK that encrypted the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Specifies the CMK that encrypted the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1870,7 +1873,7 @@ }, "PublicKey":{ "shape":"PublicKeyType", - "documentation":"

The exported public key.

This value is returned as a binary Distinguished Encoding Rules (DER)-encoded object. To decode it, use an ASN.1 parsing tool, such as OpenSSL asn1parse.

" + "documentation":"

The exported public key.

The value is a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.

" }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", @@ -2264,7 +2267,8 @@ }, "KeyStorePasswordType":{ "type":"string", - "min":1, + "max":32, + "min":7, "sensitive":true }, "KeyUnavailableException":{ @@ -2288,7 +2292,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

The request was rejected because a limit was exceeded. For more information, see Limits in the AWS Key Management Service Developer Guide.

", + "documentation":"

The request was rejected because a quota was exceeded. For more information, see Quotas in the AWS Key Management Service Developer Guide.

", "exception":true }, "LimitType":{ @@ -2581,7 +2585,7 @@ }, "Policy":{ "shape":"PolicyType", - "documentation":"

The key policy to attach to the CMK.

The key policy must meet the following criteria:

  • If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy must allow the principal that is making the PutKeyPolicy request to make a subsequent PutKeyPolicy request on the CMK. This reduces the risk that the CMK becomes unmanageable. For more information, refer to the scenario in the Default Key Policy section of the AWS Key Management Service Developer Guide.

  • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to AWS KMS. When you create a new AWS principal (for example, an IAM user or role), you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to AWS KMS. For more information, see Changes that I make are not always immediately visible in the AWS Identity and Access Management User Guide.

The key policy size limit is 32 kilobytes (32768 bytes).

" + "documentation":"

The key policy to attach to the CMK.

The key policy must meet the following criteria:

  • If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy must allow the principal that is making the PutKeyPolicy request to make a subsequent PutKeyPolicy request on the CMK. This reduces the risk that the CMK becomes unmanageable. For more information, refer to the scenario in the Default Key Policy section of the AWS Key Management Service Developer Guide.

  • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to AWS KMS. When you create a new AWS principal (for example, an IAM user or role), you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to AWS KMS. For more information, see Changes that I make are not always immediately visible in the AWS Identity and Access Management User Guide.

The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, see Resource Quotas in the AWS Key Management Service Developer Guide.

" }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -2734,7 +2738,7 @@ }, "MessageType":{ "shape":"MessageType", - "documentation":"

Tells AWS KMS whether the value of the Message parameter is a message or message digest. To indicate a message, enter RAW. To indicate a message digest, enter DIGEST.

" + "documentation":"

Tells AWS KMS whether the value of the Message parameter is a message or message digest. The default value, RAW, indicates a message. To indicate a message digest, enter DIGEST.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -2755,7 +2759,7 @@ }, "Signature":{ "shape":"CiphertextType", - "documentation":"

The cryptographic signature that was generated for the message.

" + "documentation":"

The cryptographic signature that was generated for the message.

  • When used with the supported RSA signing algorithms, the encoding of this value is defined by PKCS #1 in RFC 8017.

  • When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this value is a DER-encoded object as defined by ANS X9.62–2005 and RFC 3279 Section 2.2.3. This is the most commonly used signature format and is appropriate for most uses.

When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.

" }, "SigningAlgorithm":{ "shape":"SigningAlgorithmSpec", @@ -2948,11 +2952,11 @@ }, "Message":{ "shape":"PlaintextType", - "documentation":"

Specifies the message that was signed, or a hash digest of that message. Messages can be 0-4096 bytes. To verify a larger message, provide a hash digest of the message.

If the digest of the message specified here is different from the message digest that was signed, the signature verification fails.

" + "documentation":"

Specifies the message that was signed. You can submit a raw message of up to 4096 bytes, or a hash digest of the message. If you submit a digest, use the MessageType parameter with a value of DIGEST.

If the message specified here is different from the message that was signed, the signature verification fails. A message and its hash digest are considered to be the same message.

" }, "MessageType":{ "shape":"MessageType", - "documentation":"

Tells AWS KMS whether the value of the Message parameter is a message or message digest. To indicate a message, enter RAW. To indicate a message digest, enter DIGEST.

" + "documentation":"

Tells AWS KMS whether the value of the Message parameter is a message or message digest. The default value, RAW, indicates a message. To indicate a message digest, enter DIGEST.

Use the DIGEST value only when the value of the Message parameter is a message digest. If you use the DIGEST value with a raw message, the security of the verification operation can be compromised.

" }, "Signature":{ "shape":"CiphertextType", diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index e77e5103..6256aad4 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -84,7 +84,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age.

  • MaximumRetryAttempts - Discard records after the specified number of retries.

" + "documentation":"

Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age.

  • MaximumRetryAttempts - Discard records after the specified number of retries.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" }, "CreateFunction":{ "name":"CreateFunction", @@ -763,7 +763,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ], "documentation":"

Adds tags to a function.

" }, @@ -779,7 +780,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ], "documentation":"

Removes tags from a function.

" }, @@ -819,7 +821,7 @@ {"shape":"ResourceConflictException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Updates an event source mapping. You can change the function that AWS Lambda invokes, or pause invocation and resume later from the same location.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age.

  • MaximumRetryAttempts - Discard records after the specified number of retries.

" + "documentation":"

Updates an event source mapping. You can change the function that AWS Lambda invokes, or pause invocation and resume later from the same location.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age.

  • MaximumRetryAttempts - Discard records after the specified number of retries.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" }, "UpdateFunctionCode":{ "name":"UpdateFunctionCode", @@ -1785,7 +1787,7 @@ }, "LastUpdateStatus":{ "shape":"LastUpdateStatus", - "documentation":"

The status of the last update that was performed on the function.

" + "documentation":"

The status of the last update that was performed on the function. This is first set to Successful after function creation completes.

" }, "LastUpdateStatusReason":{ "shape":"LastUpdateStatusReason", @@ -2303,7 +2305,7 @@ }, "FunctionError":{ "shape":"String", - "documentation":"

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

  • Handled - The runtime caught an error thrown by the function and formatted it into a JSON document.

  • Unhandled - The runtime didn't handle the error. For example, the function ran out of memory or timed out.

", + "documentation":"

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

", "location":"header", "locationName":"X-Amz-Function-Error" }, @@ -2426,7 +2428,10 @@ "EniLimitExceeded", "InsufficientRolePermissions", "InvalidConfiguration", - "InternalError" + "InternalError", + "SubnetOutOfIPAddresses", + "InvalidSubnet", + "InvalidSecurityGroup" ] }, "Layer":{ @@ -2705,7 +2710,7 @@ "members":{ "MasterRegion":{ "shape":"MasterRegion", - "documentation":"

For Lambda@Edge functions, the AWS Region of the master function. For example, us-east-2 or ALL. If specified, you must set FunctionVersion to ALL.

", + "documentation":"

For Lambda@Edge functions, the AWS Region of the master function. For example, us-east-1 filters the list of functions to only include Lambda@Edge functions replicated from a master function in US East (N. Virginia). If specified, you must set FunctionVersion to ALL.

", "location":"querystring", "locationName":"MasterRegion" }, @@ -3545,7 +3550,9 @@ "InsufficientRolePermissions", "InvalidConfiguration", "InternalError", - "SubnetOutOfIPAddresses" + "SubnetOutOfIPAddresses", + "InvalidSubnet", + "InvalidSecurityGroup" ] }, "StatementId":{ @@ -3641,7 +3648,7 @@ "documentation":"

The tracing mode.

" } }, - "documentation":"

The function's AWS X-Ray tracing configuration.

" + "documentation":"

The function's AWS X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.

" }, "TracingConfigResponse":{ "type":"structure", diff --git a/botocore/data/lex-models/2017-04-19/service-2.json b/botocore/data/lex-models/2017-04-19/service-2.json index 877da23c..b85ce4be 100644 --- a/botocore/data/lex-models/2017-04-19/service-2.json +++ b/botocore/data/lex-models/2017-04-19/service-2.json @@ -689,6 +689,10 @@ "checksum":{ "shape":"String", "documentation":"

Checksum of the bot alias.

" + }, + "conversationLogs":{ + "shape":"ConversationLogsResponse", + "documentation":"

Settings that determine how Amazon Lex uses conversation logs for the alias.

" } }, "documentation":"

Provides information about a bot alias.

" @@ -912,6 +916,38 @@ "CustomPayload" ] }, + "ConversationLogsRequest":{ + "type":"structure", + "required":[ + "logSettings", + "iamRoleArn" + ], + "members":{ + "logSettings":{ + "shape":"LogSettingsRequestList", + "documentation":"

The settings for your conversation logs. You can log the conversation text, conversation audio, or both.

" + }, + "iamRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to write to your CloudWatch Logs for text logs and your S3 bucket for audio logs. If audio encryption is enabled, this role also provides access permission for the AWS KMS key used for encrypting audio logs. For more information, see Creating an IAM Role and Policy for Conversation Logs.

" + } + }, + "documentation":"

Provides the settings needed for conversation logs.

" + }, + "ConversationLogsResponse":{ + "type":"structure", + "members":{ + "logSettings":{ + "shape":"LogSettingsResponseList", + "documentation":"

The settings for your conversation logs. You can log text, audio, or both.

" + }, + "iamRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to write your logs to CloudWatch Logs or an S3 bucket.

" + } + }, + "documentation":"

Contains information about conversation log settings.

" + }, "Count":{"type":"integer"}, "CreateBotVersionRequest":{ "type":"structure", @@ -1129,6 +1165,14 @@ "valueSelectionStrategy":{ "shape":"SlotValueSelectionStrategy", "documentation":"

The strategy that Amazon Lex uses to determine the value of the slot. For more information, see PutSlotType.

" + }, + "parentSlotTypeSignature":{ + "shape":"CustomOrBuiltinSlotTypeName", + "documentation":"

The built-in slot type used a the parent of the slot type.

" + }, + "slotTypeConfigurations":{ + "shape":"SlotTypeConfigurations", + "documentation":"

Configuration information that extends the parent built-in slot type.

" } } }, @@ -1312,6 +1356,13 @@ "max":200, "min":0 }, + "Destination":{ + "type":"string", + "enum":[ + "CLOUDWATCH_LOGS", + "S3" + ] + }, "EnumerationValue":{ "type":"structure", "required":["value"], @@ -1331,7 +1382,7 @@ "type":"list", "member":{"shape":"EnumerationValue"}, "max":10000, - "min":1 + "min":0 }, "ExportStatus":{ "type":"string", @@ -1439,6 +1490,10 @@ "checksum":{ "shape":"String", "documentation":"

Checksum of the bot alias.

" + }, + "conversationLogs":{ + "shape":"ConversationLogsResponse", + "documentation":"

The settings that determine how Amazon Lex uses conversation logs for the alias.

" } } }, @@ -2209,6 +2264,14 @@ "valueSelectionStrategy":{ "shape":"SlotValueSelectionStrategy", "documentation":"

The strategy that Amazon Lex uses to determine the value of the slot. For more information, see PutSlotType.

" + }, + "parentSlotTypeSignature":{ + "shape":"CustomOrBuiltinSlotTypeName", + "documentation":"

The built-in slot type used as a parent for the slot type.

" + }, + "slotTypeConfigurations":{ + "shape":"SlotTypeConfigurations", + "documentation":"

Configuration information that extends the parent built-in slot type.

" } } }, @@ -2332,6 +2395,12 @@ "max":5, "min":1 }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:[\\w\\-]+:iam::[\\d]{12}:role\\/[\\w+=,\\.@\\-]{1,64}$" + }, "ImportStatus":{ "type":"string", "enum":[ @@ -2414,6 +2483,12 @@ "exception":true, "fault":true }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:[\\w\\-]+:kms:[\\w\\-]+:[\\d]{12}:(?:key\\/[\\w\\-]+|alias\\/[a-zA-Z0-9:\\/_\\-]{1,256})$" + }, "LambdaARN":{ "type":"string", "max":2048, @@ -2454,6 +2529,74 @@ "type":"list", "member":{"shape":"Locale"} }, + "LogSettingsRequest":{ + "type":"structure", + "required":[ + "logType", + "destination", + "resourceArn" + ], + "members":{ + "logType":{ + "shape":"LogType", + "documentation":"

The type of logging to enable. Text logs are delivered to a CloudWatch Logs log group. Audio logs are delivered to an S3 bucket.

" + }, + "destination":{ + "shape":"Destination", + "documentation":"

Where the logs will be delivered. Text logs are delivered to a CloudWatch Logs log group. Audio logs are delivered to an S3 bucket.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS customer managed key for encrypting audio logs delivered to an S3 bucket. The key does not apply to CloudWatch Logs and is optional for S3 buckets.

" + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch Logs log group or S3 bucket where the logs should be delivered.

" + } + }, + "documentation":"

Settings used to configure delivery mode and destination for conversation logs.

" + }, + "LogSettingsRequestList":{ + "type":"list", + "member":{"shape":"LogSettingsRequest"} + }, + "LogSettingsResponse":{ + "type":"structure", + "members":{ + "logType":{ + "shape":"LogType", + "documentation":"

The type of logging that is enabled.

" + }, + "destination":{ + "shape":"Destination", + "documentation":"

The destination where logs are delivered.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the key used to encrypt audio logs in an S3 bucket.

" + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch Logs log group or S3 bucket where the logs are delivered.

" + }, + "resourcePrefix":{ + "shape":"ResourcePrefix", + "documentation":"

The resource prefix is the first part of the S3 object key within the S3 bucket that you specified to contain audio logs. For CloudWatch Logs it is the prefix of the log stream name within the log group that you specified.

" + } + }, + "documentation":"

The settings for conversation logs.

" + }, + "LogSettingsResponseList":{ + "type":"list", + "member":{"shape":"LogSettingsResponse"} + }, + "LogType":{ + "type":"string", + "enum":[ + "AUDIO", + "TEXT" + ] + }, "MaxResults":{ "type":"integer", "box":true, @@ -2522,6 +2665,13 @@ "min":1, "pattern":"[0-9]+" }, + "ObfuscationSetting":{ + "type":"string", + "enum":[ + "NONE", + "DEFAULT_OBFUSCATION" + ] + }, "PreconditionFailedException":{ "type":"structure", "members":{ @@ -2601,6 +2751,10 @@ "checksum":{ "shape":"String", "documentation":"

Identifies a specific revision of the $LATEST version.

When you create a new bot alias, leave the checksum field blank. If you specify a checksum you get a BadRequestException exception.

When you want to update a bot alias, set the checksum field to the checksum of the most recent revision of the $LATEST version. If you don't specify the checksum field, or if the checksum does not match the $LATEST version, you get a PreconditionFailedException exception.

" + }, + "conversationLogs":{ + "shape":"ConversationLogsRequest", + "documentation":"

Settings for conversation logs for the alias.

" } } }, @@ -2634,6 +2788,10 @@ "checksum":{ "shape":"String", "documentation":"

The checksum for the current version of the alias.

" + }, + "conversationLogs":{ + "shape":"ConversationLogsResponse", + "documentation":"

The settings that determine how Amazon Lex uses conversation logs for the alias.

" } } }, @@ -2932,6 +3090,14 @@ "createVersion":{ "shape":"Boolean", "documentation":"

When set to true a new numbered version of the slot type is created. This is the same as calling the CreateSlotTypeVersion operation. If you do not specify createVersion, the default is false.

" + }, + "parentSlotTypeSignature":{ + "shape":"CustomOrBuiltinSlotTypeName", + "documentation":"

The built-in slot type used as the parent of the slot type. When you define a parent slot type, the new slot type has all of the same configuration as the parent.

Only AMAZON.AlphaNumeric is supported.

" + }, + "slotTypeConfigurations":{ + "shape":"SlotTypeConfigurations", + "documentation":"

Configuration information that extends the parent built-in slot type. The configuration is added to the settings for the parent slot type.

" } } }, @@ -2973,6 +3139,14 @@ "createVersion":{ "shape":"Boolean", "documentation":"

True if a new version of the slot type was created. If the createVersion field was not specified in the request, the createVersion field is set to false in the response.

" + }, + "parentSlotTypeSignature":{ + "shape":"CustomOrBuiltinSlotTypeName", + "documentation":"

The built-in slot type used as the parent of the slot type.

" + }, + "slotTypeConfigurations":{ + "shape":"SlotTypeConfigurations", + "documentation":"

Configuration information that extends the parent built-in slot type.

" } } }, @@ -2985,6 +3159,17 @@ "BotChannel" ] }, + "RegexPattern":{ + "type":"string", + "max":100, + "min":1 + }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:[\\w\\-]+:(?:logs:[\\w\\-]+:[\\d]{12}:log-group:[\\.\\-_/#A-Za-z0-9]{1,512}(?::\\*)?|s3:::[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9])$" + }, "ResourceInUseException":{ "type":"structure", "members":{ @@ -2995,6 +3180,10 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ResourcePrefix":{ + "type":"string", + "max":1024 + }, "ResourceReference":{ "type":"structure", "members":{ @@ -3069,6 +3258,10 @@ "responseCard":{ "shape":"ResponseCard", "documentation":"

A set of possible responses for the slot type used by text-based clients. A user chooses an option from the response card, instead of using text to reply.

" + }, + "obfuscationSetting":{ + "shape":"ObfuscationSetting", + "documentation":"

Determines whether a slot is obfuscated in conversation logs and stored utterances. When you obfuscate a slot, the value is replaced by the slot name in curly braces ({}). For example, if the slot name is \"full_name\", obfuscated values are replaced with \"{full_name}\". For more information, see Slot Obfuscation .

" } }, "documentation":"

Identifies the version of a specific slot.

" @@ -3092,6 +3285,22 @@ "min":1, "pattern":"^([A-Za-z](-|_|.)?)+$" }, + "SlotTypeConfiguration":{ + "type":"structure", + "members":{ + "regexConfiguration":{ + "shape":"SlotTypeRegexConfiguration", + "documentation":"

A regular expression used to validate the value of a slot.

" + } + }, + "documentation":"

Provides configuration information for a slot type.

" + }, + "SlotTypeConfigurations":{ + "type":"list", + "member":{"shape":"SlotTypeConfiguration"}, + "max":10, + "min":0 + }, "SlotTypeMetadata":{ "type":"structure", "members":{ @@ -3128,6 +3337,17 @@ "min":1, "pattern":"^([A-Za-z]_?)+$" }, + "SlotTypeRegexConfiguration":{ + "type":"structure", + "required":["pattern"], + "members":{ + "pattern":{ + "shape":"RegexPattern", + "documentation":"

A regular expression used to validate the value of a slot.

Use a standard regular expression. Amazon Lex supports the following characters in the regular expression:

  • A-Z, a-z

  • 0-9

  • Unicode characters (\"\\ u<Unicode>\")

Represent Unicode characters with four digits, for example \"\\u0041\" or \"\\u005A\".

The following regular expression operators are not supported:

  • Infinite repeaters: *, +, or {x,} with no upper bound.

  • Wild card (.)

" + } + }, + "documentation":"

Provides a regular expression used to validate the value of a slot.

" + }, "SlotUtteranceList":{ "type":"list", "member":{"shape":"Utterance"}, diff --git a/botocore/data/lightsail/2016-11-28/service-2.json b/botocore/data/lightsail/2016-11-28/service-2.json index b0fa5749..1c998ed8 100644 --- a/botocore/data/lightsail/2016-11-28/service-2.json +++ b/botocore/data/lightsail/2016-11-28/service-2.json @@ -143,7 +143,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Copies a manual instance or disk snapshot as another manual snapshot, or copies an automatic instance or disk snapshot as a manual snapshot. This operation can also be used to copy a manual or automatic snapshot of an instance or a disk from one AWS Region to another in Amazon Lightsail.

When copying a manual snapshot, be sure to define the source region, source snapshot name, and target snapshot name parameters.

When copying an automatic snapshot, be sure to define the source region, source resource name, target snapshot name, and either the restore date or the use latest restorable auto snapshot parameters.

Database snapshots cannot be copied at this time.

" + "documentation":"

Copies a manual snapshot of an instance or disk as another manual snapshot, or copies an automatic snapshot of an instance or disk as a manual snapshot. This operation can also be used to copy a manual or automatic snapshot of an instance or a disk from one AWS Region to another in Amazon Lightsail.

When copying a manual snapshot, be sure to define the source region, source snapshot name, and target snapshot name parameters.

When copying an automatic snapshot, be sure to define the source region, source resource name, target snapshot name, and either the restore date or the use latest restorable auto snapshot parameters.

" }, "CreateCloudFormationStack":{ "name":"CreateCloudFormationStack", @@ -446,7 +446,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Deletes an automatic snapshot for an instance or disk.

" + "documentation":"

Deletes an automatic snapshot of an instance or disk. For more information, see the Lightsail Dev Guide.

" }, "DeleteDisk":{ "name":"DeleteDisk", @@ -842,7 +842,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the available automatic snapshots for the specified resource name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Returns the available automatic snapshots for an instance or disk. For more information, see the Lightsail Dev Guide.

" }, "GetBlueprints":{ "name":"GetBlueprints", @@ -2001,7 +2001,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Allows the update of one or more parameters of a database in Amazon Lightsail.

Parameter updates don't cause outages; therefore, their application is not subject to the preferred maintenance window. However, there are two ways in which paramater updates are applied: dynamic or pending-reboot. Parameters marked with a dynamic apply type are applied immediately. Parameters marked with a pending-reboot apply type are applied only after the database is rebooted using the reboot relational database operation.

The update relational database parameters operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Allows the update of one or more parameters of a database in Amazon Lightsail.

Parameter updates don't cause outages; therefore, their application is not subject to the preferred maintenance window. However, there are two ways in which parameter updates are applied: dynamic or pending-reboot. Parameters marked with a dynamic apply type are applied immediately. Parameters marked with a pending-reboot apply type are applied only after the database is rebooted using the reboot relational database operation.

The update relational database parameters operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" } }, "shapes":{ @@ -2524,23 +2524,23 @@ "members":{ "sourceSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the source instance or disk snapshot to be copied.

Define this parameter only when copying a manual snapshot as another manual snapshot.

" + "documentation":"

The name of the source manual snapshot to copy.

Constraint:

  • Define this parameter only when copying a manual snapshot as another manual snapshot.

" }, "sourceResourceName":{ "shape":"string", - "documentation":"

The name of the source resource from which the automatic snapshot was created.

Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The name of the source instance or disk from which the source automatic snapshot was created.

Constraint:

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" }, "restoreDate":{ "shape":"string", - "documentation":"

The date of the automatic snapshot to copy for the new manual snapshot.

Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The date of the source automatic snapshot to copy. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" }, "useLatestRestorableAutoSnapshot":{ "shape":"boolean", - "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot of the specified source instance or disk.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.

" }, "targetSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the new instance or disk snapshot to be created as a copy.

" + "documentation":"

The name of the new manual snapshot to be created as a copy.

" }, "sourceRegion":{ "shape":"RegionName", @@ -2590,7 +2590,7 @@ }, "diskSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the disk snapshot (e.g., my-snapshot) from which to create the new storage disk.

This parameter cannot be defined together with the source disk name parameter. The disk snapshot name and source disk name parameters are mutually exclusive.

" + "documentation":"

The name of the disk snapshot (e.g., my-snapshot) from which to create the new storage disk.

Constraint:

  • This parameter cannot be defined together with the source disk name parameter. The disk snapshot name and source disk name parameters are mutually exclusive.

" }, "availabilityZone":{ "shape":"NonEmptyString", @@ -2610,15 +2610,15 @@ }, "sourceDiskName":{ "shape":"string", - "documentation":"

The name of the source disk from which the source automatic snapshot was created.

This parameter cannot be defined together with the disk snapshot name parameter. The source disk name and disk snapshot name parameters are mutually exclusive.

Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The name of the source disk from which the source automatic snapshot was created.

Constraints:

  • This parameter cannot be defined together with the disk snapshot name parameter. The source disk name and disk snapshot name parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" }, "restoreDate":{ "shape":"string", - "documentation":"

The date of the automatic snapshot to use for the new disk.

Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The date of the automatic snapshot to use for the new disk. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" }, "useLatestRestorableAutoSnapshot":{ "shape":"boolean", - "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" } } }, @@ -2802,7 +2802,7 @@ }, "instanceSnapshotName":{ "shape":"ResourceName", - "documentation":"

The name of the instance snapshot on which you are basing your new instances. Use the get instance snapshots operation to return information about your existing snapshots.

This parameter cannot be defined together with the source instance name parameter. The instance snapshot name and source instance name parameters are mutually exclusive.

" + "documentation":"

The name of the instance snapshot on which you are basing your new instances. Use the get instance snapshots operation to return information about your existing snapshots.

Constraint:

  • This parameter cannot be defined together with the source instance name parameter. The instance snapshot name and source instance name parameters are mutually exclusive.

" }, "bundleId":{ "shape":"NonEmptyString", @@ -2826,15 +2826,15 @@ }, "sourceInstanceName":{ "shape":"string", - "documentation":"

The name of the source instance from which the source automatic snapshot was created.

This parameter cannot be defined together with the instance snapshot name parameter. The source instance name and instance snapshot name parameters are mutually exclusive.

Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The name of the source instance from which the source automatic snapshot was created.

Constraints:

  • This parameter cannot be defined together with the instance snapshot name parameter. The source instance name and instance snapshot name parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" }, "restoreDate":{ "shape":"string", - "documentation":"

The date of the automatic snapshot to use for the new instance.

Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

The date of the automatic snapshot to use for the new instance. Use the get auto snapshots operation to identify the dates of the available automatic snapshots.

Constraints:

  • Must be specified in YYYY-MM-DD format.

  • This parameter cannot be defined together with the use latest restorable auto snapshot parameter. The restore date and use latest restorable auto snapshot parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" }, "useLatestRestorableAutoSnapshot":{ "shape":"boolean", - "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" + "documentation":"

A Boolean value to indicate whether to use the latest available automatic snapshot.

Constraints:

  • This parameter cannot be defined together with the restore date parameter. The use latest restorable auto snapshot and restore date parameters are mutually exclusive.

  • Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.

" } } }, @@ -3179,11 +3179,11 @@ "members":{ "resourceName":{ "shape":"ResourceName", - "documentation":"

The name of the source resource from which to delete the automatic snapshot.

" + "documentation":"

The name of the source instance or disk from which to delete the automatic snapshot.

" }, "date":{ "shape":"AutoSnapshotDate", - "documentation":"

The date of the automatic snapshot to delete in YYYY-MM-DD format.

Use the get auto snapshots operation to get the available automatic snapshots for a resource.

" + "documentation":"

The date of the automatic snapshot to delete in YYYY-MM-DD format. Use the get auto snapshots operation to get the available automatic snapshots for a resource.

" } } }, @@ -3549,7 +3549,7 @@ }, "resourceName":{ "shape":"ResourceName", - "documentation":"

The name of the source resource from which to disable the add-on.

" + "documentation":"

The name of the source resource for which to disable the add-on.

" } } }, @@ -4052,7 +4052,7 @@ "members":{ "resourceName":{ "shape":"ResourceName", - "documentation":"

The name of the source resource from which to get automatic snapshot information.

" + "documentation":"

The name of the source instance or disk from which to get automatic snapshot information.

" } } }, @@ -4061,7 +4061,7 @@ "members":{ "resourceName":{ "shape":"ResourceName", - "documentation":"

The name of the source resource for the automatic snapshots.

" + "documentation":"

The name of the source instance or disk for the automatic snapshots.

" }, "resourceType":{ "shape":"ResourceType", @@ -4069,7 +4069,7 @@ }, "autoSnapshots":{ "shape":"AutoSnapshotDetailsList", - "documentation":"

An array of objects that describe the automatic snapshots that are available for the specified source resource.asdf

" + "documentation":"

An array of objects that describe the automatic snapshots that are available for the specified source instance or disk.

" } } }, @@ -6729,6 +6729,10 @@ "pendingMaintenanceActions":{ "shape":"PendingMaintenanceActionList", "documentation":"

Describes the pending maintenance actions for the database.

" + }, + "caCertificateIdentifier":{ + "shape":"string", + "documentation":"

The certificate associated with the database.

" } }, "documentation":"

Describes a database.

" @@ -7464,6 +7468,10 @@ "applyImmediately":{ "shape":"boolean", "documentation":"

When true, applies changes immediately. When false, applies changes during the preferred maintenance window. Some changes may cause an outage.

Default: false

" + }, + "caCertificateIdentifier":{ + "shape":"string", + "documentation":"

Indicates the certificate that needs to be associated with the database.

" } } }, diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index afe44957..7d69f02e 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -25,7 +25,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Associates the specified AWS Key Management Service (AWS KMS) customer master key (CMK) with the specified log group.

Associating an AWS KMS CMK with a log group overrides any existing associations between the log group and a CMK. After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

Note that it can take up to 5 minutes for this operation to take effect.

If you attempt to associate a CMK with a log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

" + "documentation":"

Associates the specified AWS Key Management Service (AWS KMS) customer master key (CMK) with the specified log group.

Associating an AWS KMS CMK with a log group overrides any existing associations between the log group and a CMK. After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

Important: CloudWatch Logs supports only symmetric CMKs. Do not use an associate an asymmetric CMK with your log group. For more information, see Using Symmetric and Asymmetric Keys.

Note that it can take up to 5 minutes for this operation to take effect.

If you attempt to associate a CMK with a log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

" }, "CancelExportTask":{ "name":"CancelExportTask", @@ -74,7 +74,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a log group with the specified name.

You can create up to 20,000 log groups per account.

You must use the following guidelines when naming a log group:

  • Log group names must be unique within a region for an AWS account.

  • Log group names can be between 1 and 512 characters long.

  • Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)

If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

" + "documentation":"

Creates a log group with the specified name.

You can create up to 20,000 log groups per account.

You must use the following guidelines when naming a log group:

  • Log group names must be unique within a region for an AWS account.

  • Log group names can be between 1 and 512 characters long.

  • Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)

If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

Important: CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric CMK with your log group. For more information, see Using Symmetric and Asymmetric Keys.

" }, "CreateLogStream":{ "name":"CreateLogStream", @@ -89,7 +89,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a log stream for the specified log group.

There is no limit on the number of log streams that you can create for a log group.

You must use the following guidelines when naming a log stream:

  • Log stream names must be unique within the log group.

  • Log stream names can be between 1 and 512 characters long.

  • The ':' (colon) and '*' (asterisk) characters are not allowed.

" + "documentation":"

Creates a log stream for the specified log group.

There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on CreateLogStream operations, after which transactions are throttled.

You must use the following guidelines when naming a log stream:

  • Log stream names must be unique within the log group.

  • Log stream names can be between 1 and 512 characters long.

  • The ':' (colon) and '*' (asterisk) characters are not allowed.

" }, "DeleteDestination":{ "name":"DeleteDestination", @@ -462,7 +462,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnrecognizedClientException"} ], - "documentation":"

Uploads a batch of log events to the specified log stream.

You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

The batch of events must satisfy the following constraints:

  • The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

  • None of the log events in the batch can be more than 2 hours in the future.

  • None of the log events in the batch can be older than 14 days or older than the retention period of the log group.

  • The log events in the batch must be in chronological ordered by their timestamp. The timestamp is the time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools for PowerShell and the AWS SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)

  • The maximum number of log events in a batch is 10,000.

  • A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.

If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is an invalid AWS access key ID or secret key.

" + "documentation":"

Uploads a batch of log events to the specified log stream.

You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token in the expectedSequenceToken field from InvalidSequenceTokenException. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

The batch of events must satisfy the following constraints:

  • The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

  • None of the log events in the batch can be more than 2 hours in the future.

  • None of the log events in the batch can be older than 14 days or older than the retention period of the log group.

  • The log events in the batch must be in chronological ordered by their timestamp. The timestamp is the time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools for PowerShell and the AWS SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)

  • A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.

  • The maximum number of log events in a batch is 10,000.

  • There is a quota of 5 requests per second per log stream. Additional requests are throttled. This quota can't be changed.

If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is an invalid AWS access key ID or secret key.

" }, "PutMetricFilter":{ "name":"PutMetricFilter", @@ -617,7 +617,7 @@ }, "kmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. For more information, see Amazon Resource Names - AWS Key Management Service (AWS KMS).

" + "documentation":"

The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This must be a symmetric CMK. For more information, see Amazon Resource Names - AWS Key Management Service (AWS KMS) and Using Symmetric and Asymmetric Keys.

" } } }, @@ -1530,7 +1530,7 @@ "members":{ "expectedSequenceToken":{"shape":"SequenceToken"} }, - "documentation":"

The sequence token is not valid.

", + "documentation":"

The sequence token is not valid. You can get the correct sequence token in the expectedSequenceToken field in the InvalidSequenceTokenException message.

", "exception":true }, "KmsKeyId":{ diff --git a/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json b/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json index 17f866f0..bafc0b6a 100644 --- a/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json +++ b/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json @@ -101,7 +101,7 @@ "members":{ "dataSetType":{ "shape":"DataSetType", - "documentation":"

The desired data set type.

  • customer_subscriber_hourly_monthly_subscriptions

    From 2014-07-21 to present: Available daily by 24:00 UTC.

  • customer_subscriber_annual_subscriptions

    From 2014-07-21 to present: Available daily by 24:00 UTC.

  • daily_business_usage_by_instance_type

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_fees

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_free_trial_conversions

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_new_instances

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_new_product_subscribers

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • daily_business_canceled_product_subscribers

    From 2015-01-26 to present: Available daily by 24:00 UTC.

  • monthly_revenue_billing_and_revenue_data

    From 2015-02 to 2017-06: Available monthly on the 4th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from two months prior.

    From 2017-07 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.

  • monthly_revenue_annual_subscriptions

    From 2015-02 to 2017-06: Available monthly on the 4th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

    From 2017-07 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

  • monthly_revenue_field_demonstration_usage

    From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • monthly_revenue_flexible_payment_schedule

    From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • disbursed_amount_by_product

    From 2015-01-26 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_product_with_uncollected_funds

    From 2012-04-19 to 2015-01-25: Available every 30 days by 24:00 UTC.

    From 2015-01-26 to present: This data set was split into three data sets: disbursed_amount_by_product, disbursed_amount_by_age_of_uncollected_funds, and disbursed_amount_by_age_of_disbursed_funds.

  • disbursed_amount_by_instance_hours

    From 2012-09-04 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_customer_geo

    From 2012-04-19 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_uncollected_funds

    From 2015-01-26 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_disbursed_funds

    From 2015-01-26 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_past_due_funds

    From 2018-04-07 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_uncollected_funds_breakdown

    From 2019-10-04 to present: Available every 30 days by 24:00 UTC.

  • customer_profile_by_industry

    From 2015-10-01 to 2017-06-29: Available daily by 24:00 UTC.

    From 2017-06-30 to present: This data set is no longer available.

  • customer_profile_by_revenue

    From 2015-10-01 to 2017-06-29: Available daily by 24:00 UTC.

    From 2017-06-30 to present: This data set is no longer available.

  • customer_profile_by_geography

    From 2015-10-01 to 2017-06-29: Available daily by 24:00 UTC.

    From 2017-06-30 to present: This data set is no longer available.

  • sales_compensation_billed_revenue

    From 2016-12 to 2017-06: Available monthly on the 4th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from two months prior, and up-front software charges (e.g. annual) from one month prior.

    From 2017-06 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.

  • us_sales_and_use_tax_records

    From 2017-02-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

" + "documentation":"

The desired data set type.

  • customer_subscriber_hourly_monthly_subscriptions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • customer_subscriber_annual_subscriptions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_usage_by_instance_type

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_fees

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_free_trial_conversions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_new_instances

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_new_product_subscribers

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_canceled_product_subscribers

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • monthly_revenue_billing_and_revenue_data

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.

  • monthly_revenue_annual_subscriptions

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

  • monthly_revenue_field_demonstration_usage

    From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • monthly_revenue_flexible_payment_schedule

    From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • disbursed_amount_by_product

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_instance_hours

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_customer_geo

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_uncollected_funds

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_disbursed_funds

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_past_due_funds

    From 2018-04-07 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_uncollected_funds_breakdown

    From 2019-10-04 to present: Available every 30 days by 24:00 UTC.

  • sales_compensation_billed_revenue

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.

  • us_sales_and_use_tax_records

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

" }, "dataSetPublicationDate":{ "shape":"DataSetPublicationDate", diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 98e5f16b..d9ec070e 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1311,7 +1311,7 @@ }, "AlphaBehavior": { "type": "string", - "documentation": "Ignore this setting unless this input is a QuickTime animation. Specify which part of this input MediaConvert uses for your outputs. Leave this setting set to DISCARD in order to delete the alpha channel and preserve the video. Use REMAP_TO_LUMA for this setting to delete the video and map the alpha channel to the luma channel of your outputs.", + "documentation": "Ignore this setting unless this input is a QuickTime animation with an alpha channel. Use this setting to create separate Key and Fill outputs. In each output, specify which part of the input MediaConvert uses. Leave this setting at the default value DISCARD to delete the alpha channel and preserve the video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel to the luma channel of your outputs.", "enum": [ "DISCARD", "REMAP_TO_LUMA" @@ -1386,6 +1386,7 @@ "enum": [ "AAC", "MP2", + "MP3", "WAV", "AIFF", "AC3", @@ -1432,13 +1433,18 @@ "locationName": "mp2Settings", "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." }, + "Mp3Settings": { + "shape": "Mp3Settings", + "locationName": "mp3Settings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." + }, "WavSettings": { "shape": "WavSettings", "locationName": "wavSettings", "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." } }, - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" }, "AudioDefaultSelection": { "type": "string", @@ -1474,7 +1480,7 @@ "CodecSettings": { "shape": "AudioCodecSettings", "locationName": "codecSettings", - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" }, "CustomLanguageCode": { "shape": "__stringPatternAZaZ23AZaZ", @@ -1562,7 +1568,7 @@ "CorrectionGateLevel": { "shape": "__integerMinNegative70Max0", "locationName": "correctionGateLevel", - "documentation": "Content measuring above this level will be corrected to the target level. Content measuring below this level will not be corrected. Gating only applies when not using real_time_correction." + "documentation": "Content measuring above this level will be corrected to the target level. Content measuring below this level will not be corrected." }, "LoudnessLogging": { "shape": "AudioNormalizationLoudnessLogging", @@ -1596,7 +1602,7 @@ "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio." }, "ExternalAudioFileInput": { - "shape": "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", + "shape": "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", "locationName": "externalAudioFileInput", "documentation": "Specifies audio data from an external file source." }, @@ -1670,7 +1676,7 @@ "type": "structure", "members": { "AvailBlankingImage": { - "shape": "__stringMin14PatternHttpHttpsS3BmpBMPPngPNG", + "shape": "__stringMin14PatternS3BmpBMPPngPNGHttpsBmpBMPPngPNG", "locationName": "availBlankingImage", "documentation": "Blanking image to be used. Leave empty for solid black. Only bmp and png images are supported." } @@ -2249,6 +2255,11 @@ "shape": "CmafWriteHLSManifest", "locationName": "writeHlsManifest", "documentation": "When set to ENABLED, an Apple HLS manifest will be generated for this output." + }, + "WriteSegmentTimelineInRepresentation": { + "shape": "CmafWriteSegmentTimelineInRepresentation", + "locationName": "writeSegmentTimelineInRepresentation", + "documentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." } }, "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to CMAF_GROUP_SETTINGS. Each output in a CMAF Output Group may only contain a single video, audio, or caption output." @@ -2325,6 +2336,46 @@ "ENABLED" ] }, + "CmafWriteSegmentTimelineInRepresentation": { + "type": "string", + "documentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, + "CmfcScte35Esam": { + "type": "string", + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "enum": [ + "INSERT", + "NONE" + ] + }, + "CmfcScte35Source": { + "type": "string", + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "enum": [ + "PASSTHROUGH", + "NONE" + ] + }, + "CmfcSettings": { + "type": "structure", + "members": { + "Scte35Esam": { + "shape": "CmfcScte35Esam", + "locationName": "scte35Esam", + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + }, + "Scte35Source": { + "shape": "CmfcScte35Source", + "locationName": "scte35Source", + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + } + }, + "documentation": "Settings for MP4 segments in CMAF" + }, "ColorCorrector": { "type": "structure", "members": { @@ -2423,6 +2474,11 @@ "ContainerSettings": { "type": "structure", "members": { + "CmfcSettings": { + "shape": "CmfcSettings", + "locationName": "cmfcSettings", + "documentation": "Settings for MP4 segments in CMAF" + }, "Container": { "shape": "ContainerType", "locationName": "container", @@ -3900,7 +3956,7 @@ "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "SourceFile": { - "shape": "__stringMin14PatternHttpHttpsS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", + "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", "locationName": "sourceFile", "documentation": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'." }, @@ -4207,7 +4263,12 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h264Settings. Specify the target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9." + "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h264Settings. Specify the general target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + }, + "QvbrQualityLevelFineTune": { + "shape": "__doubleMin0Max1", + "locationName": "qvbrQualityLevelFineTune", + "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." @@ -4629,7 +4690,12 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h265Settings. Specify the target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9." + "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h265Settings. Specify the general target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + }, + "QvbrQualityLevelFineTune": { + "shape": "__doubleMin0Max1", + "locationName": "qvbrQualityLevelFineTune", + "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." @@ -5376,7 +5442,7 @@ "SegmentModifier": { "shape": "__string", "locationName": "segmentModifier", - "documentation": "String concatenated to end of segment filenames. Accepts \"Format Identifiers\":#format_identifier_parameters." + "documentation": "Use this setting to add an identifying string to the filename of each segment. The service adds this string between the name modifier and segment index number. You can use format identifiers in the string. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html" } }, "documentation": "Settings for HLS output groups" @@ -5483,7 +5549,7 @@ "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "FileInput": { - "shape": "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL", + "shape": "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL", "locationName": "fileInput", "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." }, @@ -5750,7 +5816,7 @@ "documentation": "Specify the height of the inserted image in pixels. If you specify a value that's larger than the video resolution height, the service will crop your overlaid image to fit. To use the native height of the image, keep this setting blank." }, "ImageInserterInput": { - "shape": "__stringMin14PatternHttpHttpsS3BmpBMPPngPNGTgaTGA", + "shape": "__stringMin14PatternS3BmpBMPPngPNGTgaTGAHttpsBmpBMPPngPNGTgaTGA", "locationName": "imageInserterInput", "documentation": "Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want to overlay on the video. Use a PNG or TGA file." }, @@ -6974,7 +7040,7 @@ "documentation": "If your motion graphic asset is a .mov file, keep this setting unspecified. If your motion graphic asset is a series of .png files, specify the frame rate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. Make sure that the number of images in your series matches the frame rate and your intended overlay duration. For example, if you want a 30-second overlay at 30 fps, you should have 900 .png images. This overlay frame rate doesn't need to match the frame rate of the underlying video." }, "Input": { - "shape": "__stringMin14Max1285PatternHttpHttpsS3Mov09Png", + "shape": "__stringMin14Max1285PatternS3Mov09PngHttpsMov09Png", "locationName": "input", "documentation": "Specify the .mov file or series of .png files that you want to overlay on your video. For .png files, provide the file name of the first file in the series. Make sure that the names of the .png files end with sequential numbers that specify the order that they are played in. For example, overlay_000.png, overlay_001.png, overlay_002.png, and so on. The sequence must start at zero, and each image file name must have the same number of digits. Pad your initial file names with enough zeros to complete the sequence. For example, if the first image is overlay_0.png, there can be only 10 images in the sequence, with the last image being overlay_9.png. But if the first image is overlay_00.png, there can be 100 images in the sequence." }, @@ -7141,6 +7207,45 @@ }, "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." }, + "Mp3RateControlMode": { + "type": "string", + "documentation": "Specify whether the service encodes this MP3 audio output with a constant bitrate (CBR) or a variable bitrate (VBR).", + "enum": [ + "CBR", + "VBR" + ] + }, + "Mp3Settings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin16000Max320000", + "locationName": "bitrate", + "documentation": "Specify the average bitrate in bits per second." + }, + "Channels": { + "shape": "__integerMin1Max2", + "locationName": "channels", + "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." + }, + "RateControlMode": { + "shape": "Mp3RateControlMode", + "locationName": "rateControlMode", + "documentation": "Specify whether the service encodes this MP3 audio output with a constant bitrate (CBR) or a variable bitrate (VBR)." + }, + "SampleRate": { + "shape": "__integerMin22050Max48000", + "locationName": "sampleRate", + "documentation": "Sample rate in hz." + }, + "VbrQuality": { + "shape": "__integerMin0Max9", + "locationName": "vbrQuality", + "documentation": "Required when you set Bitrate control mode (rateControlMode) to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." + }, "Mp4CslgAtom": { "type": "string", "documentation": "When enabled, file composition times will start at zero, composition times in the 'ctts' (composition time to sample) box for B-frames will be negative, and a 'cslg' (composition shift least greatest) box will be included per 14496-1 amendment 1. This improves compatibility with Apple players and tools.", @@ -7173,6 +7278,11 @@ "locationName": "cslgAtom", "documentation": "When enabled, file composition times will start at zero, composition times in the 'ctts' (composition time to sample) box for B-frames will be negative, and a 'cslg' (composition shift least greatest) box will be included per 14496-1 amendment 1. This improves compatibility with Apple players and tools." }, + "CttsVersion": { + "shape": "__integerMin0Max1", + "locationName": "cttsVersion", + "documentation": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." + }, "FreeSpaceBox": { "shape": "Mp4FreeSpaceBox", "locationName": "freeSpaceBox", @@ -9024,7 +9134,7 @@ "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" }, "VideoDescription": { "type": "structure", @@ -9042,7 +9152,7 @@ "CodecSettings": { "shape": "VideoCodecSettings", "locationName": "codecSettings", - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" }, "ColorMetadata": { "shape": "ColorMetadata", @@ -9165,7 +9275,7 @@ "AlphaBehavior": { "shape": "AlphaBehavior", "locationName": "alphaBehavior", - "documentation": "Ignore this setting unless this input is a QuickTime animation. Specify which part of this input MediaConvert uses for your outputs. Leave this setting set to DISCARD in order to delete the alpha channel and preserve the video. Use REMAP_TO_LUMA for this setting to delete the video and map the alpha channel to the luma channel of your outputs." + "documentation": "Ignore this setting unless this input is a QuickTime animation with an alpha channel. Use this setting to create separate Key and Fill outputs. In each output, specify which part of the input MediaConvert uses. Leave this setting at the default value DISCARD to delete the alpha channel and preserve the video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel to the luma channel of your outputs." }, "ColorSpace": { "shape": "ColorSpace", @@ -9251,6 +9361,9 @@ "__doubleMin0": { "type": "double" }, + "__doubleMin0Max1": { + "type": "double" + }, "__doubleMin0Max2147483647": { "type": "double" }, @@ -9274,6 +9387,11 @@ "min": 0, "max": 0 }, + "__integerMin0Max1": { + "type": "integer", + "min": 0, + "max": 1 + }, "__integerMin0Max10": { "type": "integer", "min": 0, @@ -9384,6 +9502,11 @@ "min": 0, "max": 8 }, + "__integerMin0Max9": { + "type": "integer", + "min": 0, + "max": 9 + }, "__integerMin0Max96": { "type": "integer", "min": 0, @@ -9424,6 +9547,11 @@ "min": 10, "max": 48 }, + "__integerMin16000Max320000": { + "type": "integer", + "min": 16000, + "max": 320000 + }, "__integerMin16Max24": { "type": "integer", "min": 16, @@ -9504,6 +9632,11 @@ "min": 1, "max": 64 }, + "__integerMin22050Max48000": { + "type": "integer", + "min": 22050, + "max": 48000 + }, "__integerMin24Max60000": { "type": "integer", "min": 24, @@ -9875,26 +10008,26 @@ "max": 11, "pattern": "^((([0-1]\\d)|(2[0-3]))(:[0-5]\\d){2}([:;][0-5]\\d))$" }, - "__stringMin14Max1285PatternHttpHttpsS3Mov09Png": { + "__stringMin14Max1285PatternS3Mov09PngHttpsMov09Png": { "type": "string", "min": 14, "max": 1285, - "pattern": "^(http|https|s3)://(.*)(\\.mov|[0-9]+\\.png)$" + "pattern": "^((s3://(.*)(\\.mov|[0-9]+\\.png))|(https?://(.*)(\\.mov|[0-9]+\\.png)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, - "__stringMin14PatternHttpHttpsS3BmpBMPPngPNG": { + "__stringMin14PatternS3BmpBMPPngPNGHttpsBmpBMPPngPNG": { "type": "string", "min": 14, - "pattern": "^(http|https|s3)://(.*?)\\.(bmp|BMP|png|PNG)$" + "pattern": "^((s3://(.*?)\\.(bmp|BMP|png|PNG))|(https?://(.*?)\\.(bmp|BMP|png|PNG)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, - "__stringMin14PatternHttpHttpsS3BmpBMPPngPNGTgaTGA": { + "__stringMin14PatternS3BmpBMPPngPNGTgaTGAHttpsBmpBMPPngPNGTgaTGA": { "type": "string", "min": 14, - "pattern": "^(http|https|s3)://(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)$" + "pattern": "^((s3://(.*?)\\.(bmp|BMP|png|PNG|tga|TGA))|(https?://(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, - "__stringMin14PatternHttpHttpsS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { + "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { "type": "string", "min": 14, - "pattern": "^(http|https|s3)://(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|xml|XML|smi|SMI)$" + "pattern": "^((s3://(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|xml|XML|smi|SMI))|(https?://(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|xml|XML|smi|SMI)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, "__stringMin16Max24PatternAZaZ0922AZaZ0916": { "type": "string", @@ -9987,14 +10120,6 @@ "type": "string", "pattern": "^(\\d+(\\/\\d+)*)$" }, - "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { - "type": "string", - "pattern": "^(http|https|s3)://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" - }, - "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { - "type": "string", - "pattern": "^(http|https|s3)://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))$" - }, "__stringPatternHttps": { "type": "string", "pattern": "^https:\\/\\/" @@ -10011,6 +10136,14 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, + "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "type": "string", + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + }, + "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { + "type": "string", + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + }, "__stringPatternSNManifestConfirmConditionNotificationNS": { "type": "string", "pattern": "^\\s*<(.|\\n)*ManifestConfirmConditionNotification(.|\\n)*>\\s*$" diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index 1108678c..9086b7cc 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -2463,6 +2463,14 @@ }, "documentation": "Audio Normalization Settings" }, + "AudioOnlyHlsSegmentType": { + "type": "string", + "documentation": "Audio Only Hls Segment Type", + "enum": [ + "AAC", + "FMP4" + ] + }, "AudioOnlyHlsSettings": { "type": "structure", "members": { @@ -2480,6 +2488,11 @@ "shape": "AudioOnlyHlsTrackType", "locationName": "audioTrackType", "documentation": "Four types of audio-only tracks are supported:\n\nAudio-Only Variant Stream\nThe client can play back this audio-only stream instead of video in low-bandwidth scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest.\n\nAlternate Audio, Auto Select, Default\nAlternate rendition that the client should try to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=YES, AUTOSELECT=YES\n\nAlternate Audio, Auto Select, Not Default\nAlternate rendition that the client may try to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES\n\nAlternate Audio, not Auto Select\nAlternate rendition that the client will not try to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=NO" + }, + "SegmentType": { + "shape": "AudioOnlyHlsSegmentType", + "locationName": "segmentType", + "documentation": "Specifies the segment type." } }, "documentation": "Audio Only Hls Settings" @@ -5473,6 +5486,17 @@ "Time" ] }, + "Fmp4HlsSettings": { + "type": "structure", + "members": { + "AudioRenditionSets": { + "shape": "__string", + "locationName": "audioRenditionSets", + "documentation": "List all the audio groups that are used with the video output stream. Input all the audio GROUP-IDs that are associated to the video, separate by ','." + } + }, + "documentation": "Fmp4 Hls Settings" + }, "FollowModeScheduleActionStartSettings": { "type": "structure", "members": { @@ -5529,6 +5553,14 @@ "Destination" ] }, + "FrameCaptureIntervalUnit": { + "type": "string", + "documentation": "Frame Capture Interval Unit", + "enum": [ + "MILLISECONDS", + "SECONDS" + ] + }, "FrameCaptureOutputSettings": { "type": "structure", "members": { @@ -5547,6 +5579,11 @@ "shape": "__integerMin1Max3600000", "locationName": "captureInterval", "documentation": "The frequency at which to capture frames for inclusion in the output. May be specified in either seconds or milliseconds, as specified by captureIntervalUnits." + }, + "CaptureIntervalUnits": { + "shape": "FrameCaptureIntervalUnit", + "locationName": "captureIntervalUnits", + "documentation": "Unit for the frame capture interval." } }, "documentation": "Frame Capture Settings", @@ -5938,7 +5975,7 @@ "RateControlMode": { "shape": "H264RateControlMode", "locationName": "rateControlMode", - "documentation": "Rate control mode.\n\nQVBR: Quality will match the specified quality level except when it is constrained by the\nmaximum bitrate. Recommended if you or your viewers pay for bandwidth.\n\nVBR: Quality and bitrate vary, depending on the video complexity. Recommended instead of QVBR\nif you want to maintain a specific average bitrate over the duration of the channel.\n\nCBR: Quality varies, depending on the video complexity. Recommended only if you distribute\nyour assets to devices that cannot handle variable bitrates." + "documentation": "Rate control mode.\n\nQVBR: Quality will match the specified quality level except when it is constrained by the\nmaximum bitrate. Recommended if you or your viewers pay for bandwidth.\n\nVBR: Quality and bitrate vary, depending on the video complexity. Recommended instead of QVBR\nif you want to maintain a specific average bitrate over the duration of the channel.\n\nCBR: Quality varies, depending on the video complexity. Recommended only if you distribute\nyour assets to devices that cannot handle variable bitrates.\n\nMultiplex: This rate control mode is only supported (and is required) when the video is being\ndelivered to a MediaLive Multiplex in which case the rate control configuration is controlled\nby the properties within the Multiplex Program." }, "ScanType": { "shape": "H264ScanType", @@ -6497,11 +6534,21 @@ "locationName": "baseUrlContent", "documentation": "A partial URI prefix that will be prepended to each output in the media .m3u8 file. Can be used if base manifest is delivered from a different URL than the main .m3u8 file." }, + "BaseUrlContent1": { + "shape": "__string", + "locationName": "baseUrlContent1", + "documentation": "Optional. One value per output group.\n\nThis field is required only if you are completing Base URL content A, and the downstream system has notified you that the media files for pipeline 1 of all outputs are in a location different from the media files for pipeline 0." + }, "BaseUrlManifest": { "shape": "__string", "locationName": "baseUrlManifest", "documentation": "A partial URI prefix that will be prepended to each output in the media .m3u8 file. Can be used if base manifest is delivered from a different URL than the main .m3u8 file." }, + "BaseUrlManifest1": { + "shape": "__string", + "locationName": "baseUrlManifest1", + "documentation": "Optional. One value per output group.\n\nComplete this field only if you are completing Base URL manifest A, and the downstream system has notified you that the child manifest files for pipeline 1 of all outputs are in a location different from the child manifest files for pipeline 0." + }, "CaptionLanguageMappings": { "shape": "__listOfCaptionLanguageMapping", "locationName": "captionLanguageMappings", @@ -6547,6 +6594,11 @@ "locationName": "hlsCdnSettings", "documentation": "Parameters that control interactions with the CDN." }, + "HlsId3SegmentTagging": { + "shape": "HlsId3SegmentTaggingState", + "locationName": "hlsId3SegmentTagging", + "documentation": "State of HLS ID3 Segment Tagging" + }, "IFrameOnlyPlaylists": { "shape": "IFrameOnlyPlaylistType", "locationName": "iFrameOnlyPlaylists", @@ -6678,6 +6730,36 @@ "Destination" ] }, + "HlsH265PackagingType": { + "type": "string", + "documentation": "Hls H265 Packaging Type", + "enum": [ + "HEV1", + "HVC1" + ] + }, + "HlsId3SegmentTaggingScheduleActionSettings": { + "type": "structure", + "members": { + "Tag": { + "shape": "__string", + "locationName": "tag", + "documentation": "ID3 tag to insert into each segment. Supports special keyword identifiers to substitute in segment-related values.\\nSupported keyword identifiers: https://docs.aws.amazon.com/medialive/latest/ug/variable-data-identifiers.html" + } + }, + "documentation": "Settings for the action to insert a user-defined ID3 tag in each HLS segment", + "required": [ + "Tag" + ] + }, + "HlsId3SegmentTaggingState": { + "type": "string", + "documentation": "State of HLS ID3 Segment Tagging", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "HlsInputSettings": { "type": "structure", "members": { @@ -6793,6 +6875,11 @@ "HlsOutputSettings": { "type": "structure", "members": { + "H265PackagingType": { + "shape": "HlsH265PackagingType", + "locationName": "h265PackagingType", + "documentation": "Only applicable when this output is referencing an H.265 video description.\nSpecifies whether MP4 segments should be packaged as HEV1 or HVC1." + }, "HlsSettings": { "shape": "HlsSettings", "locationName": "hlsSettings", @@ -6845,6 +6932,10 @@ "shape": "AudioOnlyHlsSettings", "locationName": "audioOnlyHlsSettings" }, + "Fmp4HlsSettings": { + "shape": "Fmp4HlsSettings", + "locationName": "fmp4HlsSettings" + }, "StandardHlsSettings": { "shape": "StandardHlsSettings", "locationName": "standardHlsSettings" @@ -10175,6 +10266,11 @@ "ScheduleActionSettings": { "type": "structure", "members": { + "HlsId3SegmentTaggingSettings": { + "shape": "HlsId3SegmentTaggingScheduleActionSettings", + "locationName": "hlsId3SegmentTaggingSettings", + "documentation": "Action to insert HLS ID3 segment tagging" + }, "HlsTimedMetadataSettings": { "shape": "HlsTimedMetadataScheduleActionSettings", "locationName": "hlsTimedMetadataSettings", @@ -11309,11 +11405,10 @@ "members": { "Message": { "shape": "__string", - "locationName": "message", - "documentation": "The error message." + "locationName": "message" }, "ValidationErrors": { - "shape": "__listOfMultiplexValidationError", + "shape": "__listOfValidationError", "locationName": "validationErrors", "documentation": "A collection of validation error responses." } diff --git a/botocore/data/mediapackage/2017-10-12/service-2.json b/botocore/data/mediapackage/2017-10-12/service-2.json index e8f590b1..a1fb2c18 100644 --- a/botocore/data/mediapackage/2017-10-12/service-2.json +++ b/botocore/data/mediapackage/2017-10-12/service-2.json @@ -622,6 +622,26 @@ ], "type": "string" }, + "Authorization": { + "documentation": "CDN Authorization credentials", + "members": { + "CdnIdentifierSecret": { + "documentation": "The Amazon Resource Name (ARN) for the secret in Secrets Manager that your Content Distribution Network (CDN) uses for authorization to access your endpoint.\n", + "locationName": "cdnIdentifierSecret", + "shape": "__string" + }, + "SecretsRoleArn": { + "documentation": "The Amazon Resource Name (ARN) for the IAM role that allows MediaPackage to communicate with AWS Secrets Manager.\n", + "locationName": "secretsRoleArn", + "shape": "__string" + } + }, + "required": [ + "SecretsRoleArn", + "CdnIdentifierSecret" + ], + "type": "structure" + }, "Channel": { "documentation": "A Channel resource configuration.", "members": { @@ -917,6 +937,10 @@ "CreateOriginEndpointRequest": { "documentation": "Configuration parameters used to create a new OriginEndpoint.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "ChannelId": { "documentation": "The ID of the Channel that the OriginEndpoint will be associated with.\nThis cannot be changed after the OriginEndpoint is created.\n", "locationName": "channelId", @@ -991,6 +1015,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "ChannelId": { "documentation": "The ID of the Channel the OriginEndpoint is associated with.", "locationName": "channelId", @@ -1306,6 +1334,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "ChannelId": { "documentation": "The ID of the Channel the OriginEndpoint is associated with.", "locationName": "channelId", @@ -1947,6 +1979,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "ChannelId": { "documentation": "The ID of the Channel the OriginEndpoint is associated with.", "locationName": "channelId", @@ -2018,6 +2054,10 @@ "OriginEndpointCreateParameters": { "documentation": "Configuration parameters for a new OriginEndpoint.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "ChannelId": { "documentation": "The ID of the Channel that the OriginEndpoint will be associated with.\nThis cannot be changed after the OriginEndpoint is created.\n", "locationName": "channelId", @@ -2080,8 +2120,8 @@ } }, "required": [ - "Id", - "ChannelId" + "ChannelId", + "Id" ], "type": "structure" }, @@ -2104,6 +2144,10 @@ "OriginEndpointUpdateParameters": { "documentation": "Configuration parameters for updating an existing OriginEndpoint.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "CmafPackage": { "locationName": "cmafPackage", "shape": "CmafPackageCreateOrUpdateParameters" @@ -2529,6 +2573,10 @@ "UpdateOriginEndpointRequest": { "documentation": "Configuration parameters used to update an existing OriginEndpoint.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "CmafPackage": { "locationName": "cmafPackage", "shape": "CmafPackageCreateOrUpdateParameters" @@ -2594,6 +2642,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "ChannelId": { "documentation": "The ID of the Channel the OriginEndpoint is associated with.", "locationName": "channelId", @@ -2754,4 +2806,4 @@ "type": "string" } } -} \ No newline at end of file +} diff --git a/botocore/data/mgh/2017-05-31/paginators-1.json b/botocore/data/mgh/2017-05-31/paginators-1.json index 4a1efc37..97efd0a5 100644 --- a/botocore/data/mgh/2017-05-31/paginators-1.json +++ b/botocore/data/mgh/2017-05-31/paginators-1.json @@ -23,6 +23,12 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "ProgressUpdateStreamSummaryList" + }, + "ListApplicationStates": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ApplicationStateList" } } } diff --git a/botocore/data/mgh/2017-05-31/service-2.json b/botocore/data/mgh/2017-05-31/service-2.json index 9e8fe8ac..b744e2f7 100644 --- a/botocore/data/mgh/2017-05-31/service-2.json +++ b/botocore/data/mgh/2017-05-31/service-2.json @@ -189,6 +189,23 @@ ], "documentation":"

Registers a new migration task which represents a server, database, etc., being migrated to AWS by a migration tool.

This API is a prerequisite to calling the NotifyMigrationTaskState API as the migration tool must first register the migration task with Migration Hub.

" }, + "ListApplicationStates":{ + "name":"ListApplicationStates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationStatesRequest"}, + "output":{"shape":"ListApplicationStatesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"HomeRegionNotSetException"} + ], + "documentation":"

Lists all the migration statuses for your applications. If you use the optional ApplicationIds parameter, only the migration statuses for those applications will be returned.

" + }, "ListCreatedArtifacts":{ "name":"ListCreatedArtifacts", "http":{ @@ -337,6 +354,36 @@ "max":1600, "min":1 }, + "ApplicationIds":{ + "type":"list", + "member":{"shape":"ApplicationId"}, + "max":100, + "min":1 + }, + "ApplicationState":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"ApplicationId", + "documentation":"

The configurationId from the Application Discovery Service that uniquely identifies an application.

" + }, + "ApplicationStatus":{ + "shape":"ApplicationStatus", + "documentation":"

The current status of an application.

" + }, + "LastUpdatedTime":{ + "shape":"UpdateDateTime", + "documentation":"

The timestamp when the application status was last updated.

" + } + }, + "documentation":"

The state of an application discovered through Migration Hub import, the AWS Agentless Discovery Connector, or the AWS Application Discovery Agent.

" + }, + "ApplicationStateList":{ + "type":"list", + "member":{"shape":"ApplicationState"}, + "max":1000, + "min":0 + }, "ApplicationStatus":{ "type":"string", "enum":[ @@ -681,6 +728,36 @@ "max":100, "min":0 }, + "ListApplicationStatesRequest":{ + "type":"structure", + "members":{ + "ApplicationIds":{ + "shape":"ApplicationIds", + "documentation":"

The configurationIds from the Application Discovery Service that uniquely identifies your applications.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If a NextToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Maximum number of results to be returned per page.

" + } + } + }, + "ListApplicationStatesResult":{ + "type":"structure", + "members":{ + "ApplicationStateList":{ + "shape":"ApplicationStateList", + "documentation":"

A list of Applications that exist in Application Discovery Service.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If a NextToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + } + } + }, "ListCreatedArtifactsRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/mq/2017-11-27/service-2.json b/botocore/data/mq/2017-11-27/service-2.json index 98259235..2def4c65 100644 --- a/botocore/data/mq/2017-11-27/service-2.json +++ b/botocore/data/mq/2017-11-27/service-2.json @@ -753,6 +753,16 @@ "locationName" : "hostInstanceType", "documentation" : "The type of broker instance." }, + "StorageType" : { + "shape" : "BrokerStorageType", + "locationName" : "storageType", + "documentation" : "The broker's storage type." + }, + "SupportedDeploymentModes" : { + "shape" : "__listOfDeploymentMode", + "locationName" : "supportedDeploymentModes", + "documentation" : "The list of supported deployment modes." + }, "SupportedEngineVersions" : { "shape" : "__listOf__string", "locationName" : "supportedEngineVersions", @@ -787,6 +797,11 @@ "documentation" : "The status of the broker.", "enum" : [ "CREATION_IN_PROGRESS", "CREATION_FAILED", "DELETION_IN_PROGRESS", "RUNNING", "REBOOT_IN_PROGRESS" ] }, + "BrokerStorageType" : { + "type" : "string", + "documentation" : "The storage type of the broker.", + "enum" : [ "EBS", "EFS" ] + }, "BrokerSummary" : { "type" : "structure", "members" : { @@ -1029,7 +1044,12 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." + }, + "StorageType" : { + "shape" : "BrokerStorageType", + "locationName" : "storageType", + "documentation" : "The broker's storage type." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -1047,7 +1067,7 @@ "documentation" : "Required. The list of ActiveMQ users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } }, - "documentation" : "Required. The time period during which Amazon MQ applies pending updates or patches to the broker." + "documentation" : "Required. The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, "CreateBrokerOutput" : { "type" : "structure", @@ -1132,7 +1152,12 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." + }, + "StorageType" : { + "shape" : "BrokerStorageType", + "locationName" : "storageType", + "documentation" : "The broker's storage type." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -1508,6 +1533,12 @@ "location" : "querystring", "locationName" : "nextToken", "documentation" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty." + }, + "StorageType" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "storageType", + "documentation" : "Filter response by storage type." } } }, @@ -1614,16 +1645,16 @@ "locationName" : "pendingEngineVersion", "documentation" : "The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, - "PendingSecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "pendingSecurityGroups", - "documentation" : "The list of pending security groups to authorize connections to brokers." - }, "PendingHostInstanceType" : { "shape" : "__string", "locationName" : "pendingHostInstanceType", "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" }, + "PendingSecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "pendingSecurityGroups", + "documentation" : "The list of pending security groups to authorize connections to brokers." + }, "PubliclyAccessible" : { "shape" : "__boolean", "locationName" : "publiclyAccessible", @@ -1632,7 +1663,12 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." + }, + "StorageType" : { + "shape" : "BrokerStorageType", + "locationName" : "storageType", + "documentation" : "The broker's storage type." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -1747,16 +1783,16 @@ "locationName" : "pendingEngineVersion", "documentation" : "The version of the broker engine to upgrade to. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" }, - "PendingSecurityGroups" : { - "shape" : "__listOf__string", - "locationName" : "pendingSecurityGroups", - "documentation" : "The list of pending security groups to authorize connections to brokers." - }, "PendingHostInstanceType" : { "shape" : "__string", "locationName" : "pendingHostInstanceType", "documentation" : "The host instance type of the broker to upgrade to. For a list of supported instance types, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide//broker.html#broker-instance-types" }, + "PendingSecurityGroups" : { + "shape" : "__listOf__string", + "locationName" : "pendingSecurityGroups", + "documentation" : "The list of pending security groups to authorize connections to brokers." + }, "PubliclyAccessible" : { "shape" : "__boolean", "locationName" : "publiclyAccessible", @@ -1765,7 +1801,12 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." + }, + "StorageType" : { + "shape" : "BrokerStorageType", + "locationName" : "storageType", + "documentation" : "The broker's storage type." }, "SubnetIds" : { "shape" : "__listOf__string", @@ -2553,7 +2594,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." } }, "documentation" : "Updates the broker using the specified properties." @@ -2594,7 +2635,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." } }, "documentation" : "Returns information about the updated broker." @@ -2636,7 +2677,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." } }, "documentation" : "Updates the broker using the specified properties.", @@ -2678,7 +2719,7 @@ "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", - "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorize connections to brokers." + "documentation" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers." } } }, @@ -2997,6 +3038,12 @@ "shape" : "ConfigurationRevision" } }, + "__listOfDeploymentMode" : { + "type" : "list", + "member" : { + "shape" : "DeploymentMode" + } + }, "__listOfEngineVersion" : { "type" : "list", "member" : { @@ -3062,4 +3109,4 @@ } }, "documentation" : "Amazon MQ is a managed message broker service for Apache ActiveMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols." -} +} \ No newline at end of file diff --git a/botocore/data/neptune/2014-10-31/service-2.json b/botocore/data/neptune/2014-10-31/service-2.json index cf3ebb8e..c9ea716e 100644 --- a/botocore/data/neptune/2014-10-31/service-2.json +++ b/botocore/data/neptune/2014-10-31/service-2.json @@ -1269,7 +1269,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, Amazon Neptune encrypts the target DB cluster snapshot using the specified KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

" + "documentation":"

The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

You cannot encrypt an unencrypted DB cluster snapshot when you copy it. If you try to copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" }, "PreSignedUrl":{ "shape":"String", @@ -1340,7 +1340,7 @@ }, "CharacterSetName":{ "shape":"String", - "documentation":"

A value that indicates that the DB cluster should be associated with the specified CharacterSet.

" + "documentation":"

(Not supported by Neptune)

" }, "DatabaseName":{ "shape":"String", @@ -1368,7 +1368,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

Example: 1.0.1

" + "documentation":"

The version number of the database engine to use. Currently, setting this parameter has no effect.

Example: 1.0.1

" }, "Port":{ "shape":"IntegerOptional", @@ -1384,7 +1384,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

A value that indicates that the DB cluster should be associated with the specified option group.

Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.

" + "documentation":"

(Not supported by Neptune)

" }, "PreferredBackupWindow":{ "shape":"String", @@ -1421,6 +1421,10 @@ "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

" } } }, @@ -1567,7 +1571,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

" + "documentation":"

The version number of the database engine to use. Currently, setting this parameter has no effect.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -1583,11 +1587,11 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

Indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

" + "documentation":"

(Not supported by Neptune)

" }, "CharacterSetName":{ "shape":"String", - "documentation":"

Indicates that the DB instance should be associated with the specified CharacterSet.

Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

" + "documentation":"

(Not supported by Neptune)

" }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -1656,15 +1660,19 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

" + "documentation":"

(Not supported by Neptune)

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + "documentation":"

(Not supported by Neptune)

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.

" } } }, @@ -1798,7 +1806,7 @@ }, "CharacterSetName":{ "shape":"String", - "documentation":"

If present, specifies the name of the character set that this cluster is associated with.

" + "documentation":"

(Not supported by Neptune)

" }, "DatabaseName":{ "shape":"String", @@ -1862,7 +1870,7 @@ }, "DBClusterOptionGroupMemberships":{ "shape":"DBClusterOptionGroupMemberships", - "documentation":"

Provides the list of option group memberships for this DB cluster.

" + "documentation":"

(Not supported by Neptune)

" }, "PreferredBackupWindow":{ "shape":"String", @@ -1927,6 +1935,10 @@ "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled.

" } }, "documentation":"

Contains the details of an Amazon Neptune DB cluster.

This data type is used as a response element in the DescribeDBClusters action.

", @@ -2365,11 +2377,11 @@ }, "DefaultCharacterSet":{ "shape":"CharacterSet", - "documentation":"

The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

" + "documentation":"

(Not supported by Neptune)

" }, "SupportedCharacterSets":{ "shape":"SupportedCharacterSetsList", - "documentation":"

A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance action.

" + "documentation":"

(Not supported by Neptune)

" }, "ValidUpgradeTarget":{ "shape":"ValidUpgradeTargetList", @@ -2527,11 +2539,11 @@ }, "OptionGroupMemberships":{ "shape":"OptionGroupMembershipList", - "documentation":"

Provides the list of option group memberships for this DB instance.

" + "documentation":"

(Not supported by Neptune)

" }, "CharacterSetName":{ "shape":"String", - "documentation":"

If present, specifies the name of the character set that this instance is associated with.

" + "documentation":"

(Not supported by Neptune)

" }, "SecondaryAvailabilityZone":{ "shape":"String", @@ -2616,15 +2628,19 @@ }, "PerformanceInsightsEnabled":{ "shape":"BooleanOptional", - "documentation":"

True if Performance Insights is enabled for the DB instance, and otherwise false.

" + "documentation":"

(Not supported by Neptune)

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + "documentation":"

(Not supported by Neptune)

" }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

A list of log types that this DB instance is configured to export to CloudWatch Logs.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled.

" } }, "documentation":"

Contains the details of an Amazon Neptune DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", @@ -4164,7 +4180,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.

" + "documentation":"

(Not supported by Neptune)

" }, "PreferredBackupWindow":{ "shape":"String", @@ -4184,7 +4200,11 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

For a list of valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" + "documentation":"

The version number of the database engine. Currently, setting this parameter has no effect. To upgrade your database engine to the most recent release, use the ApplyPendingMaintenanceAction API.

For a list of valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

" } } }, @@ -4300,11 +4320,11 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

" + "documentation":"

The version number of the database engine to upgrade to. Currently, setting this parameter has no effect. To upgrade your database engine to the most recent release, use the ApplyPendingMaintenanceAction API.

" }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

Indicates that major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.

Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

" + "documentation":"

Indicates that major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -4320,7 +4340,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

Indicates that the DB instance should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

" + "documentation":"

(Not supported by Neptune)

" }, "NewDBInstanceIdentifier":{ "shape":"String", @@ -4381,15 +4401,19 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

Not supported.

" + "documentation":"

(Not supported by Neptune)

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

Not supported.

" + "documentation":"

(Not supported by Neptune)

" }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

" } } }, @@ -4565,7 +4589,7 @@ }, "SupportsPerformanceInsights":{ "shape":"Boolean", - "documentation":"

True if a DB instance supports Performance Insights, otherwise false.

" + "documentation":"

(Not supported by Neptune)

" }, "MinStorageSize":{ "shape":"IntegerOptional", @@ -4796,7 +4820,7 @@ }, "PendingCloudwatchLogsExports":{ "shape":"PendingCloudwatchLogsExports", - "documentation":"

Specifies the CloudWatch logs to be exported.

" + "documentation":"

This PendingCloudwatchLogsExports structure specifies pending changes to which CloudWatch logs are enabled and which are disabled.

" } }, "documentation":"

This data type is used as a response element in the ModifyDBInstance action.

" @@ -5057,7 +5081,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

The name of the option group to use for the restored DB cluster.

" + "documentation":"

(Not supported by Neptune)

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", @@ -5082,6 +5106,10 @@ "DBClusterParameterGroupName":{ "shape":"String", "documentation":"

The name of the DB cluster parameter group to associate with the new DB cluster.

Constraints:

  • If supplied, must match the name of an existing DBClusterParameterGroup.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

" } } }, @@ -5128,7 +5156,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

The name of the option group for the new DB cluster.

" + "documentation":"

(Not supported by Neptune)

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", @@ -5153,6 +5181,10 @@ "DBClusterParameterGroupName":{ "shape":"String", "documentation":"

The name of the DB cluster parameter group to associate with the new DB cluster.

Constraints:

  • If supplied, must match the name of an existing DBClusterParameterGroup.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.

" } } }, diff --git a/botocore/data/opsworkscm/2016-11-01/service-2.json b/botocore/data/opsworkscm/2016-11-01/service-2.json index 82e715d4..1f6448fe 100644 --- a/botocore/data/opsworkscm/2016-11-01/service-2.json +++ b/botocore/data/opsworkscm/2016-11-01/service-2.json @@ -190,6 +190,20 @@ ], "documentation":"

Exports a specified server engine attribute as a base64-encoded string. For example, you can export user data that you can use in EC2 to associate nodes with a server.

This operation is synchronous.

A ValidationException is raised when parameters of the request are not valid. A ResourceNotFoundException is thrown when the server does not exist. An InvalidStateException is thrown when the server is in any of the following states: CREATING, TERMINATED, FAILED or DELETING.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of tags that are applied to the specified AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise servers or backups.

" + }, "RestoreServer":{ "name":"RestoreServer", "http":{ @@ -203,7 +217,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Restores a backup to a server that is in a CONNECTION_LOST, HEALTHY, RUNNING, UNHEALTHY, or TERMINATED state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of the server's client devices (nodes) should continue to work.

This operation is asynchronous.

An InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid.

" + "documentation":"

Restores a backup to a server that is in a CONNECTION_LOST, HEALTHY, RUNNING, UNHEALTHY, or TERMINATED state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of the server's client devices (nodes) should continue to work.

Restoring from a backup is performed by creating a new EC2 instance. If restoration is successful, and the server is in a HEALTHY state, AWS OpsWorks CM switches traffic over to the new instance. After restoration is finished, the old EC2 instance is maintained in a Running or Stopped state, but is eventually terminated.

This operation is asynchronous.

An InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid.

" }, "StartMaintenance":{ "name":"StartMaintenance", @@ -220,6 +234,36 @@ ], "documentation":"

Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server is in an UNDER_MAINTENANCE state while maintenance is in progress.

Maintenance can only be started on servers in HEALTHY and UNHEALTHY states. Otherwise, an InvalidStateException is thrown. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidStateException"} + ], + "documentation":"

Applies tags to an AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise server, or to server backups.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidStateException"} + ], + "documentation":"

Removes specified tags from an AWS OpsWorks-CM server or backup.

" + }, "UpdateServer":{ "name":"UpdateServer", "http":{ @@ -252,6 +296,10 @@ } }, "shapes":{ + "AWSOpsWorksCMResourceArn":{ + "type":"string", + "pattern":"arn:aws.*:opsworks-cm:.*:[0-9]{12}:.*" + }, "AccountAttribute":{ "type":"structure", "members":{ @@ -462,6 +510,10 @@ "Description":{ "shape":"String", "documentation":"

A user-defined description of the backup.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A map that contains tag keys and tag values to attach to an AWS OpsWorks-CM server backup.

  • The key cannot be empty.

  • The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

  • The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

  • Leading and trailing white spaces are trimmed from both the key and value.

  • A maximum of 50 user-applied tags is allowed for tag-supported AWS OpsWorks-CM resources.

" } } }, @@ -489,15 +541,15 @@ }, "CustomDomain":{ "shape":"CustomDomain", - "documentation":"

An optional public endpoint of a server, such as https://aws.my-company.com. To access the server, create a CNAME DNS record in your preferred DNS service that points the custom domain to the endpoint that is generated when the server is created (the value of the CreateServer Endpoint attribute). You cannot access the server by using the generated Endpoint value if the server is using a custom domain. If you specify a custom domain, you must also specify values for CustomCertificate and CustomPrivateKey.

" + "documentation":"

Supported on servers running Chef Automate 2. An optional public endpoint of a server, such as https://aws.my-company.com. To access the server, create a CNAME DNS record in your preferred DNS service that points the custom domain to the endpoint that is generated when the server is created (the value of the CreateServer Endpoint attribute). You cannot access the server by using the generated Endpoint value if the server is using a custom domain. If you specify a custom domain, you must also specify values for CustomCertificate and CustomPrivateKey.

" }, "CustomCertificate":{ "shape":"CustomCertificate", - "documentation":"

A PEM-formatted HTTPS certificate. The value can be be a single, self-signed certificate, or a certificate chain. If you specify a custom certificate, you must also specify values for CustomDomain and CustomPrivateKey. The following are requirements for the CustomCertificate value:

  • You can provide either a self-signed, custom certificate, or the full certificate chain.

  • The certificate must be a valid X509 certificate, or a certificate chain in PEM format.

  • The certificate must be valid at the time of upload. A certificate can't be used before its validity period begins (the certificate's NotBefore date), or after it expires (the certificate's NotAfter date).

  • The certificate’s common name or subject alternative names (SANs), if present, must match the value of CustomDomain.

  • The certificate must match the value of CustomPrivateKey.

" + "documentation":"

Supported on servers running Chef Automate 2. A PEM-formatted HTTPS certificate. The value can be be a single, self-signed certificate, or a certificate chain. If you specify a custom certificate, you must also specify values for CustomDomain and CustomPrivateKey. The following are requirements for the CustomCertificate value:

  • You can provide either a self-signed, custom certificate, or the full certificate chain.

  • The certificate must be a valid X509 certificate, or a certificate chain in PEM format.

  • The certificate must be valid at the time of upload. A certificate can't be used before its validity period begins (the certificate's NotBefore date), or after it expires (the certificate's NotAfter date).

  • The certificate’s common name or subject alternative names (SANs), if present, must match the value of CustomDomain.

  • The certificate must match the value of CustomPrivateKey.

" }, "CustomPrivateKey":{ "shape":"CustomPrivateKey", - "documentation":"

A private key in PEM format for connecting to the server by using HTTPS. The private key must not be encrypted; it cannot be protected by a password or passphrase. If you specify a custom private key, you must also specify values for CustomDomain and CustomCertificate.

" + "documentation":"

Supported on servers running Chef Automate 2. A private key in PEM format for connecting to the server by using HTTPS. The private key must not be encrypted; it cannot be protected by a password or passphrase. If you specify a custom private key, you must also specify values for CustomDomain and CustomCertificate.

" }, "DisableAutomatedBackup":{ "shape":"Boolean", @@ -559,6 +611,10 @@ "shape":"Strings", "documentation":"

The IDs of subnets in which to launch the server EC2 instance.

Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.

EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.

For more information about supported Amazon EC2 platforms, see Supported Platforms.

" }, + "Tags":{ + "shape":"TagList", + "documentation":"

A map that contains tag keys and tag values to attach to an AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise server.

  • The key cannot be empty.

  • The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

  • The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

  • Leading and trailing white spaces are trimmed from both the key and value.

  • A maximum of 50 user-applied tags is allowed for any AWS OpsWorks-CM server.

" + }, "BackupId":{ "shape":"BackupId", "documentation":"

If you specify this field, AWS OpsWorks CM creates the server by using the backup represented by BackupId.

" @@ -751,7 +807,7 @@ "members":{ "Servers":{ "shape":"Servers", - "documentation":"

Contains the response to a DescribeServers request.

For Puppet Server: DescribeServersResponse$Servers$EngineAttributes contains PUPPET_API_CA_CERT. This is the PEM-encoded CA certificate that is used by the Puppet API over TCP port number 8140. The CA certificate is also used to sign node certificates.

" + "documentation":"

Contains the response to a DescribeServers request.

For Chef Automate servers: If DescribeServersResponse$Servers$EngineAttributes includes CHEF_MAJOR_UPGRADE_AVAILABLE, you can upgrade the Chef Automate server to Chef Automate 2. To be eligible for upgrade, a server running Chef Automate 1 must have had at least one successful maintenance run after November 1, 2019.

For Puppet Server: DescribeServersResponse$Servers$EngineAttributes contains PUPPET_API_CA_CERT. This is the PEM-encoded CA certificate that is used by the Puppet API over TCP port number 8140. The CA certificate is also used to sign node certificates.

" }, "NextToken":{ "shape":"String", @@ -896,6 +952,37 @@ "documentation":"

The limit of servers or backups has been reached.

", "exception":true }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AWSOpsWorksCMResourceArn", + "documentation":"

The Amazon Resource Number (ARN) of an AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise server for which you want to show applied tags. For example, arn:aws:opsworks-cm:us-west-2:123456789012:server/test-owcm-server/EXAMPLE-66b0-4196-8274-d1a2bEXAMPLE.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ListTagsForResource again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null. Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

Tags that have been applied to the resource.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that you can use as the value of NextToken in subsequent calls to the API to show more results.

" + } + } + }, "MaintenanceStatus":{ "type":"string", "enum":[ @@ -971,7 +1058,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, m5.large. Valid values are m5.large, r5.xlarge, and r5.2xlarge. If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.

" + "documentation":"

The type of instance to restore. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, m5.large. Valid values are m5.large, r5.xlarge, and r5.2xlarge. If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.

" }, "KeyPair":{ "shape":"KeyPair", @@ -1155,7 +1242,7 @@ }, "EngineAttributes":{ "shape":"EngineAttributes", - "documentation":"

Engine attributes that are specific to the server on which you want to run maintenance.

" + "documentation":"

Engine attributes that are specific to the server on which you want to run maintenance.

Attributes accepted in a StartMaintenance request for Chef

" } } }, @@ -1177,6 +1264,70 @@ "type":"list", "member":{"shape":"String"} }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

A tag key, such as Stage or Name. A tag key cannot be empty. The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

An optional tag value, such as Production or test-owcm-server. The value can be a maximum of 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

" + } + }, + "documentation":"

A map that contains tag keys and tag values to attach to an AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise server. Leading and trailing white spaces are trimmed from both the key and value. A maximum of 50 user-applied tags is allowed for tag-supported AWS OpsWorks-CM resources.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\\\\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AWSOpsWorksCMResourceArn", + "documentation":"

The Amazon Resource Number (ARN) of a resource to which you want to apply tags. For example, arn:aws:opsworks-cm:us-west-2:123456789012:server/test-owcm-server/EXAMPLE-66b0-4196-8274-d1a2bEXAMPLE.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A map that contains tag keys and tag values to attach to AWS OpsWorks-CM servers or backups.

  • The key cannot be empty.

  • The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

  • The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /

  • Leading and trailing white spaces are trimmed from both the key and value.

  • A maximum of 50 user-applied tags is allowed for any AWS OpsWorks-CM server or backup.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\\\\\-@]*)$" + }, "TimeWindowDefinition":{ "type":"string", "documentation":"

DDD:HH:MM (weekly start time) or HH:MM (daily start time).

Time windows always use coordinated universal time (UTC). Valid strings for day of week (DDD) are: Mon, Tue, Wed, Thr, Fri, Sat, or Sun.

", @@ -1184,6 +1335,28 @@ "pattern":"^((Mon|Tue|Wed|Thu|Fri|Sat|Sun):)?([0-1][0-9]|2[0-3]):[0-5][0-9]$" }, "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AWSOpsWorksCMResourceArn", + "documentation":"

The Amazon Resource Number (ARN) of a resource from which you want to remove tags. For example, arn:aws:opsworks-cm:us-west-2:123456789012:server/test-owcm-server/EXAMPLE-66b0-4196-8274-d1a2bEXAMPLE.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of tags that you want to remove.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateServerEngineAttributesRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index fec21810..7387f4df 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -1090,7 +1090,7 @@ "members":{ "PolicyId":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy that you want to attach to the target. You can get the ID for the policy by calling the ListPolicies operation.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy that you want to attach to the target. You can get the ID for the policy by calling the ListPolicies operation.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" }, "TargetId":{ "shape":"PolicyTargetId", @@ -1169,7 +1169,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. Examples include attempting to remove the last service control policy (SCP) from an OU or root, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity, which would cause the entity to have fewer than the minimum number of policies of the required type.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. Examples include attempting to remove the last service control policy (SCP) from an OU or root, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity, which would cause the entity to have fewer than the minimum number of policies of the required type.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

  • TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant with the tag policy that’s in effect for the account. For more information, see Tag Policies in the AWS Organizations User Guide.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1458,7 +1458,7 @@ "members":{ "PolicyId":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy that you want to delete. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy that you want to delete. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" } } }, @@ -1576,7 +1576,7 @@ "members":{ "PolicyId":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy that you want details about. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy that you want details about. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" } } }, @@ -1606,7 +1606,7 @@ "members":{ "PolicyId":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy you want to detach. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy you want to detach. You can get the ID from the ListPolicies or ListPoliciesForTarget operations.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" }, "TargetId":{ "shape":"PolicyTargetId", @@ -2481,7 +2481,7 @@ "members":{ "PolicyId":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy whose attachments you want to know.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy whose attachments you want to know.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" }, "NextToken":{ "shape":"NextToken", @@ -3112,7 +3112,7 @@ "members":{ "PolicyId":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy that you want to update.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy that you want to update.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" }, "Name":{ "shape":"PolicyName", diff --git a/botocore/data/personalize-runtime/2018-05-22/service-2.json b/botocore/data/personalize-runtime/2018-05-22/service-2.json index 3c29c4a2..bee26269 100644 --- a/botocore/data/personalize-runtime/2018-05-22/service-2.json +++ b/botocore/data/personalize-runtime/2018-05-22/service-2.json @@ -49,6 +49,22 @@ "max":256, "pattern":"arn:([a-z\\d-]+):personalize:.*:.*:.+" }, + "AttributeName":{ + "type":"string", + "max":150, + "pattern":"[A-Za-z\\d_]+" + }, + "AttributeValue":{ + "type":"string", + "max":1000, + "sensitive":true + }, + "Context":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"}, + "max":150 + }, "ErrorMessage":{"type":"string"}, "GetPersonalizedRankingRequest":{ "type":"structure", @@ -64,11 +80,15 @@ }, "inputList":{ "shape":"InputList", - "documentation":"

A list of items (itemId's) to rank. If an item was not included in the training dataset, the item is appended to the end of the reranked list.

" + "documentation":"

A list of items (itemId's) to rank. If an item was not included in the training dataset, the item is appended to the end of the reranked list. The maximum is 500.

" }, "userId":{ "shape":"UserID", "documentation":"

The user for which you want the campaign to provide a personalized ranking.

" + }, + "context":{ + "shape":"Context", + "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type. For more information, see Contextual Metadata.

" } } }, @@ -77,7 +97,7 @@ "members":{ "personalizedRanking":{ "shape":"ItemList", - "documentation":"

A list of items in order of most likely interest to the user.

" + "documentation":"

A list of items in order of most likely interest to the user. The maximum is 500.

" } } }, @@ -99,7 +119,11 @@ }, "numResults":{ "shape":"NumResults", - "documentation":"

The number of results to return. The default is 25. The maximum is 100.

" + "documentation":"

The number of results to return. The default is 25. The maximum is 500.

" + }, + "context":{ + "shape":"Context", + "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type. For more information, see Contextual Metadata.

" } } }, @@ -108,7 +132,7 @@ "members":{ "itemList":{ "shape":"ItemList", - "documentation":"

A list of recommendations.

" + "documentation":"

A list of recommendations sorted in ascending order by prediction score. There can be a maximum of 500 items in the list.

" } } }, diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 4c1f84b9..94f9c861 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -132,7 +132,7 @@ "documentation": "

The request was denied because access to the specified resource is forbidden (ForbiddenException).

" } ], - "documentation": "

Creates a message template that you can use in messages that are sent through the email channel.

" + "documentation": "

Creates a message template for messages that are sent through the email channel.

" }, "CreateExportJob": { "name": "CreateExportJob", @@ -296,7 +296,7 @@ "documentation": "

The request was denied because access to the specified resource is forbidden (ForbiddenException).

" } ], - "documentation": "

Creates a message template that you can use in messages that are sent through a push notification channel.

" + "documentation": "

Creates a message template for messages that are sent through a push notification channel.

" }, "CreateSegment": { "name": "CreateSegment", @@ -376,7 +376,7 @@ "documentation": "

The request was denied because access to the specified resource is forbidden (ForbiddenException).

" } ], - "documentation": "

Creates a message template that you can use in messages that are sent through the SMS channel.

" + "documentation": "

Creates a message template for messages that are sent through the SMS channel.

" }, "CreateVoiceTemplate": { "name": "CreateVoiceTemplate", @@ -414,7 +414,7 @@ "documentation": "

The request was denied because access to the specified resource is forbidden (ForbiddenException).

" } ], - "documentation": "

Creates a message template that you can use in messages that are sent through the voice channel.

" + "documentation": "

Creates a message template for messages that are sent through the voice channel.

" }, "DeleteAdmChannel": { "name": "DeleteAdmChannel", @@ -834,7 +834,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Deletes a message template that was designed for use in messages that were sent through the email channel.

" + "documentation": "

Deletes a message template for messages that were sent through the email channel.

" }, "DeleteEndpoint": { "name": "DeleteEndpoint", @@ -1044,7 +1044,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Deletes a message template that was designed for use in messages that were sent through a push notification channel.

" + "documentation": "

Deletes a message template for messages that were sent through a push notification channel.

" }, "DeleteSegment": { "name": "DeleteSegment", @@ -1170,7 +1170,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Deletes a message template that was designed for use in messages that were sent through the SMS channel.

" + "documentation": "

Deletes a message template for messages that were sent through the SMS channel.

" }, "DeleteUserEndpoints": { "name": "DeleteUserEndpoints", @@ -1296,7 +1296,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Deletes a message template that was designed for use in messages that were sent through the voice channel.

" + "documentation": "

Deletes a message template for messages that were sent through the voice channel.

" }, "GetAdmChannel": { "name": "GetAdmChannel", @@ -1674,7 +1674,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Retrieves information about all of your applications.

" + "documentation": "

Retrieves information about all the applications that are associated with your Amazon Pinpoint account.

" }, "GetBaiduChannel": { "name": "GetBaiduChannel", @@ -2094,7 +2094,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Retrieves the content and settings for a message template that you can use in messages that are sent through the email channel.

" + "documentation": "

Retrieves the content and settings of a message template for messages that are sent through the email channel.

" }, "GetEndpoint": { "name": "GetEndpoint", @@ -2598,7 +2598,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Retrieves the content and settings for a message template that you can use in messages that are sent through a push notification channel.

" + "documentation": "

Retrieves the content and settings of a message template for messages that are sent through a push notification channel.

" }, "GetSegment": { "name": "GetSegment", @@ -2808,7 +2808,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Retrieves information about the configuration, dimension, and other settings for all versions of a specific segment that's associated with an application.

" + "documentation": "

Retrieves information about the configuration, dimension, and other settings for all the versions of a specific segment that's associated with an application.

" }, "GetSegments": { "name": "GetSegments", @@ -2934,7 +2934,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Retrieves the content and settings for a message template that you can use in messages that are sent through the SMS channel.

" + "documentation": "

Retrieves the content and settings of a message template for messages that are sent through the SMS channel.

" }, "GetUserEndpoints": { "name": "GetUserEndpoints", @@ -3060,7 +3060,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Retrieves the content and settings for a message template that you can use in messages that are sent through the voice channel.

" + "documentation": "

Retrieves the content and settings of a message template for messages that are sent through the voice channel.

" }, "ListJourneys": { "name": "ListJourneys", @@ -3118,11 +3118,51 @@ "shape": "ListTagsForResourceResponse", "documentation": "

The request succeeded.

" }, - "errors": [ - - ], + "errors": [], "documentation": "

Retrieves all the tags (keys and values) that are associated with an application, campaign, journey, message template, or segment.

" }, + "ListTemplateVersions": { + "name": "ListTemplateVersions", + "http": { + "method": "GET", + "requestUri": "/v1/templates/{template-name}/{template-type}/versions", + "responseCode": 200 + }, + "input": { + "shape": "ListTemplateVersionsRequest" + }, + "output": { + "shape": "ListTemplateVersionsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

The request contains a syntax error (BadRequestException).

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure (InternalServerErrorException).

" + }, + { + "shape": "ForbiddenException", + "documentation": "

The request was denied because access to the specified resource is forbidden (ForbiddenException).

" + }, + { + "shape": "NotFoundException", + "documentation": "

The request failed because the specified resource was not found (NotFoundException).

" + }, + { + "shape": "MethodNotAllowedException", + "documentation": "

The request failed because the method is not allowed for the specified resource (MethodNotAllowedException).

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" + } + ], + "documentation": "

Retrieves information about all the versions of a specific message template.

" + }, "ListTemplates": { "name": "ListTemplates", "http": { @@ -3423,9 +3463,7 @@ "input": { "shape": "TagResourceRequest" }, - "errors": [ - - ], + "errors": [], "documentation": "

Adds one or more tags (keys and values) to an application, campaign, journey, message template, or segment.

" }, "UntagResource": { @@ -3438,9 +3476,7 @@ "input": { "shape": "UntagResourceRequest" }, - "errors": [ - - ], + "errors": [], "documentation": "

Removes one or more tags (keys and values) from an application, campaign, journey, message template, or segment.

" }, "UpdateAdmChannel": { @@ -3861,7 +3897,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Updates an existing message template that you can use in messages that are sent through the email channel.

" + "documentation": "

Updates an existing message template for messages that are sent through the email channel.

" }, "UpdateEndpoint": { "name": "UpdateEndpoint", @@ -4071,7 +4107,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Cancels an active journey.

" + "documentation": "

Cancels (stops) an active journey.

" }, "UpdatePushTemplate": { "name": "UpdatePushTemplate", @@ -4113,7 +4149,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Updates an existing message template that you can use in messages that are sent through a push notification channel.

" + "documentation": "

Updates an existing message template for messages that are sent through a push notification channel.

" }, "UpdateSegment": { "name": "UpdateSegment", @@ -4239,7 +4275,49 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Updates an existing message template that you can use in messages that are sent through the SMS channel.

" + "documentation": "

Updates an existing message template for messages that are sent through the SMS channel.

" + }, + "UpdateTemplateActiveVersion": { + "name": "UpdateTemplateActiveVersion", + "http": { + "method": "PUT", + "requestUri": "/v1/templates/{template-name}/{template-type}/active-version", + "responseCode": 200 + }, + "input": { + "shape": "UpdateTemplateActiveVersionRequest" + }, + "output": { + "shape": "UpdateTemplateActiveVersionResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

The request contains a syntax error (BadRequestException).

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure (InternalServerErrorException).

" + }, + { + "shape": "ForbiddenException", + "documentation": "

The request was denied because access to the specified resource is forbidden (ForbiddenException).

" + }, + { + "shape": "NotFoundException", + "documentation": "

The request failed because the specified resource was not found (NotFoundException).

" + }, + { + "shape": "MethodNotAllowedException", + "documentation": "

The request failed because the method is not allowed for the specified resource (MethodNotAllowedException).

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" + } + ], + "documentation": "

Changes the status of a specific version of a message template to active.

" }, "UpdateVoiceChannel": { "name": "UpdateVoiceChannel", @@ -4323,7 +4401,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Updates an existing message template that you can use in messages that are sent through the voice channel.

" + "documentation": "

Updates an existing message template for messages that are sent through the voice channel.

" } }, "shapes": { @@ -5006,7 +5084,7 @@ }, "MultiCondition": { "shape": "MultiConditionalSplitActivity", - "documentation": "

The settings for a multivariate split activity. This type of activity sends participants down one of as many as five paths in a journey, based on conditions that you specify.

" + "documentation": "

The settings for a multivariate split activity. This type of activity sends participants down one of as many as five paths (including a default Else path) in a journey, based on conditions that you specify.

" }, "RandomSplit": { "shape": "RandomSplitActivity", @@ -5553,7 +5631,7 @@ }, "FilterType": { "shape": "FilterType", - "documentation": "

The type of event that causes the campaign to be sent. Valid values are: SYSTEM, sends the campaign when a system event occurs; and, ENDPOINT, sends the campaign when an endpoint event (Events resource) occurs.

" + "documentation": "

The type of event that causes the campaign to be sent. Valid values are: SYSTEM, sends the campaign when a system event occurs; and, ENDPOINT, sends the campaign when an endpoint event (Events resource) occurs.

" } }, "documentation": "

Specifies the settings for events that cause a campaign to be sent.

", @@ -5619,7 +5697,7 @@ }, "CreationDate": { "shape": "__string", - "documentation": "

The date, ISO 8601 format, when the campaign was created.

" + "documentation": "

The date, in ISO 8601 format, when the campaign was created.

" }, "DefaultState": { "shape": "CampaignState", @@ -5864,11 +5942,11 @@ }, "FalseActivity": { "shape": "__string", - "documentation": "

The unique identifier for the activity to perform if the condition isn't met.

" + "documentation": "

The unique identifier for the activity to perform if the conditions aren't met.

" }, "TrueActivity": { "shape": "__string", - "documentation": "

The unique identifier for the activity to perform if the condition is met.

" + "documentation": "

The unique identifier for the activity to perform if the conditions are met.

" } }, "documentation": "

Specifies the settings for a yes/no split activity in a journey. This type of activity sends participants down one of two paths in a journey, based on conditions that you specify.

" @@ -5950,8 +6028,7 @@ "type": "structure", "members": { "EmailTemplateRequest": { - "shape": "EmailTemplateRequest", - "documentation": "

Creates a new message template that you can use in messages that are sent through the email channel.

" + "shape": "EmailTemplateRequest" }, "TemplateName": { "shape": "__string", @@ -6075,8 +6152,7 @@ "type": "structure", "members": { "PushNotificationTemplateRequest": { - "shape": "PushNotificationTemplateRequest", - "documentation": "

Creates a message template that you can use in messages that are sent through a push notification channel.

" + "shape": "PushNotificationTemplateRequest" }, "TemplateName": { "shape": "__string", @@ -6138,8 +6214,7 @@ "type": "structure", "members": { "SMSTemplateRequest": { - "shape": "SMSTemplateRequest", - "documentation": "

Creates a message template that you can use in messages that are sent through the SMS channel.

" + "shape": "SMSTemplateRequest" }, "TemplateName": { "shape": "__string", @@ -6538,6 +6613,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -6682,6 +6763,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -6767,6 +6854,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -6852,6 +6945,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -7087,6 +7186,10 @@ "TemplateName": { "shape": "__string", "documentation": "

The name of the email template to use for the message.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the email template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" } }, "documentation": "

Specifies the settings for an email activity in a journey. This type of activity sends an email message to participants.

" @@ -7131,7 +7234,7 @@ }, "CreationDate": { "shape": "__string", - "documentation": "

The date when the message template was created.

" + "documentation": "

The date, in ISO 8601 format, when the message template was created.

" }, "DefaultSubstitutions": { "shape": "__string", @@ -7143,7 +7246,7 @@ }, "LastModifiedDate": { "shape": "__string", - "documentation": "

The date when the message template was last modified.

" + "documentation": "

The date, in ISO 8601 format, when the message template was last modified.

" }, "Subject": { "shape": "__string", @@ -7169,6 +7272,10 @@ "TextPart": { "shape": "__string", "documentation": "

The message body, in plain text format, that's used in email messages that are based on the message template.

" + }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier, as an integer, for the active version of the message template, or the version of the template that you specified by using the version parameter in your request.

" } }, "documentation": "

Provides information about the content and settings for a message template that can be used in messages that are sent through the email channel.

", @@ -7259,11 +7366,11 @@ }, "Make": { "shape": "__string", - "documentation": "

The manufacturer of the endpoint device, such as Apple or Samsung.

" + "documentation": "

The manufacturer of the endpoint device, such as apple or samsung.

" }, "Model": { "shape": "__string", - "documentation": "

The model name or number of the endpoint device, such as iPhone.

" + "documentation": "

The model name or number of the endpoint device, such as iPhone or SM-G900F.

" }, "ModelVersion": { "shape": "__string", @@ -7271,7 +7378,7 @@ }, "Platform": { "shape": "__string", - "documentation": "

The platform of the endpoint device, such as iOS or Android.

" + "documentation": "

The platform of the endpoint device, such as ios.

" }, "PlatformVersion": { "shape": "__string", @@ -8233,7 +8340,7 @@ "shape": "__timestampIso8601", "location": "querystring", "locationName": "end-time", - "documentation": "

The last date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format, for example: 2019-07-19T00:00:00Z for July 19, 2019 and 2019-07-19T20:00:00Z for 8:00 PM July 19, 2019.

" + "documentation": "

The last date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format and use Coordinated Universal Time (UTC), for example: 2019-07-26T20:00:00Z for 8:00 PM UTC July 26, 2019.

" }, "KpiName": { "shape": "__string", @@ -8257,7 +8364,7 @@ "shape": "__timestampIso8601", "location": "querystring", "locationName": "start-time", - "documentation": "

The first date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format, for example: 2019-07-15T00:00:00Z for July 15, 2019 and 2019-07-15T16:00:00Z for 4:00 PM July 15, 2019.

" + "documentation": "

The first date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format and use Coordinated Universal Time (UTC), for example: 2019-07-19T20:00:00Z for 8:00 PM UTC July 19, 2019. This value should also be fewer than 90 days from the current day.

" } }, "required": [ @@ -8422,7 +8529,7 @@ "shape": "__timestampIso8601", "location": "querystring", "locationName": "end-time", - "documentation": "

The last date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format, for example: 2019-07-19T00:00:00Z for July 19, 2019 and 2019-07-19T20:00:00Z for 8:00 PM July 19, 2019.

" + "documentation": "

The last date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format and use Coordinated Universal Time (UTC), for example: 2019-07-26T20:00:00Z for 8:00 PM UTC July 26, 2019.

" }, "KpiName": { "shape": "__string", @@ -8446,7 +8553,7 @@ "shape": "__timestampIso8601", "location": "querystring", "locationName": "start-time", - "documentation": "

The first date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format, for example: 2019-07-15T00:00:00Z for July 15, 2019 and 2019-07-15T16:00:00Z for 4:00 PM July 15, 2019.

" + "documentation": "

The first date and time to retrieve data for, as part of an inclusive date range that filters the query results. This value should be in extended ISO 8601 format and use Coordinated Universal Time (UTC), for example: 2019-07-19T20:00:00Z for 8:00 PM UTC July 19, 2019. This value should also be fewer than 90 days from the current day.

" } }, "required": [ @@ -8683,6 +8790,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -9130,6 +9243,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -9428,6 +9547,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -9513,6 +9638,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -9540,7 +9671,7 @@ }, "Percentage": { "shape": "__integer", - "documentation": "

The percentage of participants who shouldn't continue the journey.

" + "documentation": "

The percentage of participants who shouldn't continue the journey.

To determine which participants are held out, Amazon Pinpoint applies a probability-based algorithm to the percentage that you specify. Therefore, the actual percentage of participants who are held out may not be equal to the percentage that you specify.

" } }, "documentation": "

Specifies the settings for a holdout activity in a journey. This type of activity stops a journey for a specified percentage of participants.

", @@ -9912,7 +10043,7 @@ "members": { "Activities": { "shape": "MapOfActivity", - "documentation": "

The configuration and other settings for the activities that comprise the journey.

" + "documentation": "

A map that contains a set of Activity objects, one object for each activity in the journey. For each Activity object, the key is the unique identifier (string) for an activity and the value is the settings for the activity.

" }, "ApplicationId": { "shape": "__string", @@ -9964,7 +10095,7 @@ }, "State": { "shape": "State", - "documentation": "

The current status of the journey. Possible values are:

  • DRAFT - The journey is being developed and hasn't been published yet.

  • ACTIVE - The journey has been developed and published. Depending on the journey's schedule, the journey may currently be running or scheduled to start running at a later time. If a journey's status is ACTIVE, you can't add, change, or remove activities from it.

  • COMPLETED - The journey has been published and has finished running. All participants have entered the journey and no participants are waiting to complete the journey or any activities in the journey.

  • CANCELLED - The journey has been stopped. If a journey's status is CANCELLED, you can't add, change, or remove activities or segment settings from the journey.

  • CLOSED - The journey has been published and has started running. It may have also passed its scheduled end time, or passed its scheduled start time and a refresh frequency hasn't been specified for it. If a journey's status is CLOSED, you can't add participants to it, and no existing participants can enter the journey for the first time. However, any existing participants who are currently waiting to start an activity may resume the journey.

" + "documentation": "

The current status of the journey. Possible values are:

  • DRAFT - The journey is being developed and hasn't been published yet.

  • ACTIVE - The journey has been developed and published. Depending on the journey's schedule, the journey may currently be running or scheduled to start running at a later time. If a journey's status is ACTIVE, you can't add, change, or remove activities from it.

  • COMPLETED - The journey has been published and has finished running. All participants have entered the journey and no participants are waiting to complete the journey or any activities in the journey.

  • CANCELLED - The journey has been stopped. If a journey's status is CANCELLED, you can't add, change, or remove activities or segment settings from the journey.

  • CLOSED - The journey has been published and has started running. It may have also passed its scheduled end time, or passed its scheduled start time and a refresh frequency hasn't been specified for it. If a journey's status is CLOSED, you can't add participants to it, and no existing participants can enter the journey for the first time. However, any existing participants who are currently waiting to start an activity may continue the journey.

" }, "tags": { "shape": "MapOf__string", @@ -10088,6 +10219,51 @@ ], "payload": "TagsModel" }, + "ListTemplateVersionsRequest": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "next-token", + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + }, + "PageSize": { + "shape": "__string", + "location": "querystring", + "locationName": "page-size", + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + }, + "TemplateName": { + "shape": "__string", + "location": "uri", + "locationName": "template-name", + "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "TemplateType": { + "shape": "__string", + "location": "uri", + "locationName": "template-type", + "documentation": "

The type of channel that the message template is designed for. Valid values are: EMAIL, PUSH, SMS, and VOICE.

" + } + }, + "required": [ + "TemplateName", + "TemplateType" + ] + }, + "ListTemplateVersionsResponse": { + "type": "structure", + "members": { + "TemplateVersionsResponse": { + "shape": "TemplateVersionsResponse" + } + }, + "required": [ + "TemplateVersionsResponse" + ], + "payload": "TemplateVersionsResponse" + }, "ListTemplatesRequest": { "type": "structure", "members": { @@ -10113,7 +10289,7 @@ "shape": "__string", "location": "querystring", "locationName": "template-type", - "documentation": "

The type of message template to include in the results. Valid values are: EMAIL, SMS, PUSH, and VOICE. To include all types of templates in the results, don't include this parameter in your request.

" + "documentation": "

The type of message template to include in the results. Valid values are: EMAIL, PUSH, SMS, and VOICE. To include all types of templates in the results, don't include this parameter in your request.

" } } }, @@ -10392,14 +10568,14 @@ }, "DefaultActivity": { "shape": "__string", - "documentation": "

The activity to perform by default for any path in the activity.

" + "documentation": "

The unique identifier for the activity to perform for participants who don't meet any of the conditions specified for other paths in the activity.

" }, "EvaluationWaitTime": { "shape": "WaitTime", "documentation": "

The amount of time to wait or the date and time when Amazon Pinpoint determines whether the conditions are met.

" } }, - "documentation": "

Specifies the settings for a multivariate split activity in a journey. This type of activity sends participants down one of as many as five paths in a journey, based on conditions that you specify.

" + "documentation": "

Specifies the settings for a multivariate split activity in a journey. This type of activity sends participants down one of as many as five paths (including a default Else path) in a journey, based on conditions that you specify.

" }, "NotFoundException": { "type": "structure", @@ -10636,7 +10812,7 @@ }, "CreationDate": { "shape": "__string", - "documentation": "

The date when the message template was created.

" + "documentation": "

The date, in ISO 8601 format, when the message template was created.

" }, "Default": { "shape": "DefaultPushNotificationTemplate", @@ -10652,7 +10828,7 @@ }, "LastModifiedDate": { "shape": "__string", - "documentation": "

The date when the message template was last modified.

" + "documentation": "

The date, in ISO 8601 format, when the message template was last modified.

" }, "tags": { "shape": "MapOf__string", @@ -10670,14 +10846,18 @@ "TemplateType": { "shape": "TemplateType", "documentation": "

The type of channel that the message template is designed for. For a push notification template, this value is PUSH.

" + }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier, as an integer, for the active version of the message template, or the version of the template that you specified by using the version parameter in your request.

" } }, "documentation": "

Provides information about the content and settings for a message template that can be used in messages that are sent through a push notification channel.

", "required": [ "LastModifiedDate", "CreationDate", - "TemplateName", - "TemplateType" + "TemplateType", + "TemplateName" ] }, "PutEventStreamRequest": { @@ -10775,7 +10955,7 @@ }, "Percentage": { "shape": "__integer", - "documentation": "

The percentage of participants to send down the activity path.

" + "documentation": "

The percentage of participants to send down the activity path.

To determine which participants are sent down each path, Amazon Pinpoint applies a probability-based algorithm to the percentages that you specify for the paths. Therefore, the actual percentage of participants who are sent down a path may not be equal to the percentage that you specify.

" } }, "documentation": "

Specifies the settings for a path in a random split activity in a journey.

" @@ -11046,7 +11226,7 @@ }, "CreationDate": { "shape": "__string", - "documentation": "

The date when the message template was created.

" + "documentation": "

The date, in ISO 8601 format, when the message template was created.

" }, "DefaultSubstitutions": { "shape": "__string", @@ -11054,7 +11234,7 @@ }, "LastModifiedDate": { "shape": "__string", - "documentation": "

The date when the message template was last modified.

" + "documentation": "

The date, in ISO 8601 format, when the message template was last modified.

" }, "tags": { "shape": "MapOf__string", @@ -11072,6 +11252,10 @@ "TemplateType": { "shape": "TemplateType", "documentation": "

The type of channel that the message template is designed for. For an SMS template, this value is SMS.

" + }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier, as an integer, for the active version of the message template, or the version of the template that you specified by using the version parameter in your request.

" } }, "documentation": "

Provides information about the content and settings for a message template that can be used in text messages that are sent through the SMS channel.

", @@ -11670,9 +11854,23 @@ "Name": { "shape": "__string", "documentation": "

The name of the message template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the message template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" } }, - "documentation": "

Specifies the name of the message template to use for the message.

" + "documentation": "

Specifies the name and version of the message template to use for the message.

" + }, + "TemplateActiveVersionRequest": { + "type": "structure", + "members": { + "Version": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the message template to use as the active version of the template. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

" + } + }, + "documentation": "

Specifies which version of a message template to use as the active version of the template.

" }, "TemplateConfiguration": { "type": "structure", @@ -11705,7 +11903,7 @@ }, "CreationDate": { "shape": "__string", - "documentation": "

The date when the message template was created.

" + "documentation": "

The date, in ISO 8601 format, when the message template was created.

" }, "DefaultSubstitutions": { "shape": "__string", @@ -11713,7 +11911,7 @@ }, "LastModifiedDate": { "shape": "__string", - "documentation": "

The date when the message template was last modified.

" + "documentation": "

The date, in ISO 8601 format, when the message template was last modified.

" }, "tags": { "shape": "MapOf__string", @@ -11730,7 +11928,11 @@ }, "TemplateType": { "shape": "TemplateType", - "documentation": "

The type of channel that the message template is designed for.

" + "documentation": "

The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, and VOICE.

" + }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier, as an integer, for the active version of the message template.

" } }, "documentation": "

Provides information about a message template that's associated with your Amazon Pinpoint account.

", @@ -11750,6 +11952,71 @@ "PUSH" ] }, + "TemplateVersionResponse": { + "type": "structure", + "members": { + "CreationDate": { + "shape": "__string", + "documentation": "

The date, in ISO 8601 format, when the version of the message template was created.

" + }, + "DefaultSubstitutions": { + "shape": "__string", + "documentation": "

A JSON object that specifies the default values that are used for message variables in the version of the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.

" + }, + "LastModifiedDate": { + "shape": "__string", + "documentation": "

The date, in ISO 8601 format, when the version of the message template was last modified.

" + }, + "TemplateDescription": { + "shape": "__string", + "documentation": "

The custom description of the version of the message template.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the message template.

" + }, + "TemplateType": { + "shape": "__string", + "documentation": "

The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, and VOICE.

" + }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the message template. This value is an integer that Amazon Pinpoint automatically increments and assigns to each new version of a template.

" + } + }, + "documentation": "

Provides information about a specific version of a message template.

", + "required": [ + "LastModifiedDate", + "CreationDate", + "TemplateName", + "TemplateType" + ] + }, + "TemplateVersionsResponse": { + "type": "structure", + "members": { + "Item": { + "shape": "ListOfTemplateVersionResponse", + "documentation": "

An array of responses, one for each version of the message template.

" + }, + "Message": { + "shape": "__string", + "documentation": "

The message that's returned from the API for the request to retrieve information about all the versions of the message template.

" + }, + "NextToken": { + "shape": "__string", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + }, + "RequestID": { + "shape": "__string", + "documentation": "

The unique identifier for the request to retrieve information about all the versions of the message template.

" + } + }, + "documentation": "

Provides information about all the versions of a specific message template.

", + "required": [ + "Item" + ] + }, "TemplatesResponse": { "type": "structure", "members": { @@ -12155,6 +12422,12 @@ "UpdateEmailTemplateRequest": { "type": "structure", "members": { + "CreateNewVersion": { + "shape": "__boolean", + "location": "querystring", + "locationName": "create-new-version", + "documentation": "

Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to the latest existing version of the template.

If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.

" + }, "EmailTemplateRequest": { "shape": "EmailTemplateRequest" }, @@ -12163,6 +12436,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -12362,15 +12641,26 @@ "UpdatePushTemplateRequest": { "type": "structure", "members": { + "CreateNewVersion": { + "shape": "__boolean", + "location": "querystring", + "locationName": "create-new-version", + "documentation": "

Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to the latest existing version of the template.

If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.

" + }, "PushNotificationTemplateRequest": { - "shape": "PushNotificationTemplateRequest", - "documentation": "

Updates an existing message template that you can use in messages that are sent through a push notification channel.

" + "shape": "PushNotificationTemplateRequest" }, "TemplateName": { "shape": "__string", "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -12463,6 +12753,12 @@ "UpdateSmsTemplateRequest": { "type": "structure", "members": { + "CreateNewVersion": { + "shape": "__boolean", + "location": "querystring", + "locationName": "create-new-version", + "documentation": "

Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to the latest existing version of the template.

If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.

" + }, "SMSTemplateRequest": { "shape": "SMSTemplateRequest" }, @@ -12471,6 +12767,12 @@ "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" } }, "required": [ @@ -12491,6 +12793,44 @@ ], "payload": "MessageBody" }, + "UpdateTemplateActiveVersionRequest": { + "type": "structure", + "members": { + "TemplateActiveVersionRequest": { + "shape": "TemplateActiveVersionRequest" + }, + "TemplateName": { + "shape": "__string", + "location": "uri", + "locationName": "template-name", + "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" + }, + "TemplateType": { + "shape": "__string", + "location": "uri", + "locationName": "template-type", + "documentation": "

The type of channel that the message template is designed for. Valid values are: EMAIL, PUSH, SMS, and VOICE.

" + } + }, + "required": [ + "TemplateName", + "TemplateType", + "TemplateActiveVersionRequest" + ], + "payload": "TemplateActiveVersionRequest" + }, + "UpdateTemplateActiveVersionResponse": { + "type": "structure", + "members": { + "MessageBody": { + "shape": "MessageBody" + } + }, + "required": [ + "MessageBody" + ], + "payload": "MessageBody" + }, "UpdateVoiceChannelRequest": { "type": "structure", "members": { @@ -12525,12 +12865,24 @@ "UpdateVoiceTemplateRequest": { "type": "structure", "members": { + "CreateNewVersion": { + "shape": "__boolean", + "location": "querystring", + "locationName": "create-new-version", + "documentation": "

Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to the latest existing version of the template.

If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.

" + }, "TemplateName": { "shape": "__string", "location": "uri", "locationName": "template-name", "documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

" }, + "Version": { + "shape": "__string", + "location": "querystring", + "locationName": "version", + "documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier of an existing template version. If specified for an update operation, this value must match the identifier of the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

  • For a get operation, retrieves information about the active version of the template.

  • For an update operation, saves the updates to the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.

  • For a delete operation, deletes the template, including all versions of the template.

" + }, "VoiceTemplateRequest": { "shape": "VoiceTemplateRequest" } @@ -12682,7 +13034,7 @@ }, "CreationDate": { "shape": "__string", - "documentation": "

The date when the message template was created.

" + "documentation": "

The date, in ISO 8601 format, when the message template was created.

" }, "DefaultSubstitutions": { "shape": "__string", @@ -12694,7 +13046,7 @@ }, "LastModifiedDate": { "shape": "__string", - "documentation": "

The date when the message template was last modified.

" + "documentation": "

The date, in ISO 8601 format, when the message template was last modified.

" }, "tags": { "shape": "MapOf__string", @@ -12713,6 +13065,10 @@ "shape": "TemplateType", "documentation": "

The type of channel that the message template is designed for. For a voice template, this value is VOICE.

" }, + "Version": { + "shape": "__string", + "documentation": "

The unique identifier, as an integer, for the active version of the message template, or the version of the template that you specified by using the version parameter in your request.

" + }, "VoiceId": { "shape": "__string", "documentation": "

The name of the voice that's used when delivering messages that are based on the message template. For a list of supported voices, see the Amazon Polly Developer Guide.

" @@ -12866,7 +13222,7 @@ "members": { "Activities": { "shape": "MapOfActivity", - "documentation": "

The configuration and other settings for the activities that comprise the journey.

" + "documentation": "

A map that contains a set of Activity objects, one object for each activity in the journey. For each Activity object, the key is the unique identifier (string) for an activity and the value is the settings for the activity. An activity identifier can contain a maximum of 128 characters. The characters must be alphanumeric characters.

" }, "CreationDate": { "shape": "__string", @@ -12902,7 +13258,7 @@ }, "StartActivity": { "shape": "__string", - "documentation": "

The unique identifier for the first activity in the journey.

" + "documentation": "

The unique identifier for the first activity in the journey. An activity identifier can contain a maximum of 128 characters. The characters must be alphanumeric characters.

" }, "StartCondition": { "shape": "StartCondition", @@ -13091,6 +13447,12 @@ "shape": "TemplateResponse" } }, + "ListOfTemplateVersionResponse": { + "type": "list", + "member": { + "shape": "TemplateVersionResponse" + } + }, "ListOfTreatmentResource": { "type": "list", "member": { diff --git a/botocore/data/rds/2014-10-31/paginators-1.json b/botocore/data/rds/2014-10-31/paginators-1.json index 3f9bdcb8..af1667ff 100644 --- a/botocore/data/rds/2014-10-31/paginators-1.json +++ b/botocore/data/rds/2014-10-31/paginators-1.json @@ -204,6 +204,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "Targets" + }, + "DescribeExportTasks": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "ExportTasks" } } } diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index ddfba0a9..6e38e052 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -127,6 +127,23 @@ ], "documentation":"

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskMessage"}, + "output":{ + "shape":"ExportTask", + "resultWrapper":"CancelExportTaskResult" + }, + "errors":[ + {"shape":"ExportTaskNotFoundFault"}, + {"shape":"InvalidExportTaskStateFault"} + ], + "documentation":"

Cancels an export task in progress that is exporting a snapshot to Amazon S3. Any data that has already been written to the S3 bucket isn't removed.

" + }, "CopyDBClusterParameterGroup":{ "name":"CopyDBClusterParameterGroup", "http":{ @@ -164,7 +181,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

    If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot is to be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

    If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, "CopyDBParameterGroup":{ "name":"CopyDBParameterGroup", @@ -202,7 +219,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" + "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" }, "CopyOptionGroup":{ "name":"CopyOptionGroup", @@ -511,7 +528,7 @@ {"shape":"SubscriptionCategoryNotFoundFault"}, {"shape":"SourceNotFoundFault"} ], - "documentation":"

Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all RDS sources belonging to your customer account.

RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.

" + "documentation":"

Creates an RDS event notification subscription. This action requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIdentifier, you are notified of events generated from all RDS sources belonging to your customer account.

RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.

" }, "CreateGlobalCluster":{ "name":"CreateGlobalCluster", @@ -653,7 +670,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBInstanceAutomatedBackupQuotaExceededFault"} ], - "documentation":"

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB cluster is a Read Replica of another Amazon Aurora DB cluster.

  • The DB instance is the only instance in the DB cluster.

To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

" + "documentation":"

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB cluster is a Read Replica of another Amazon Aurora DB cluster.

  • The DB instance is the only instance in the DB cluster.

To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

" }, "DeleteDBInstanceAutomatedBackup":{ "name":"DeleteDBInstanceAutomatedBackup", @@ -983,7 +1000,7 @@ "errors":[ {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Returns information about provisioned Aurora DB clusters. This API supports pagination.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Returns information about provisioned Aurora DB clusters. This API supports pagination.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

" }, "DescribeDBEngineVersions":{ "name":"DescribeDBEngineVersions", @@ -1028,7 +1045,7 @@ "errors":[ {"shape":"DBInstanceNotFoundFault"} ], - "documentation":"

Returns information about provisioned RDS instances. This API supports pagination.

" + "documentation":"

Returns information about provisioned RDS instances. This API supports pagination.

This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

" }, "DescribeDBLogFiles":{ "name":"DescribeDBLogFiles", @@ -1260,6 +1277,22 @@ }, "documentation":"

Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

" }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksMessage"}, + "output":{ + "shape":"ExportTasksMessage", + "resultWrapper":"DescribeExportTasksResult" + }, + "errors":[ + {"shape":"ExportTaskNotFoundFault"} + ], + "documentation":"

Returns information about a snapshot export to Amazon S3. This API operation supports pagination.

" + }, "DescribeGlobalClusters":{ "name":"DescribeGlobalClusters", "http":{ @@ -1482,6 +1515,22 @@ ], "documentation":"

Lists all tags on an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, + "ModifyCertificates":{ + "name":"ModifyCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCertificatesMessage"}, + "output":{ + "shape":"ModifyCertificatesResult", + "resultWrapper":"ModifyCertificatesResult" + }, + "errors":[ + {"shape":"CertificateNotFoundFault"} + ], + "documentation":"

Override the system-default Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificate for Amazon RDS for new DB instances temporarily, or remove the override.

By using this operation, you can specify an RDS-approved SSL/TLS certificate for new DB instances that is different from the default certificate provided by RDS. You can also use this operation to remove the override, so that new DB instances use the default certificate provided by RDS.

You might need to override the default certificate in the following situations:

  • You already migrated your applications to support the latest certificate authority (CA) certificate, but the new CA certificate is not yet the RDS default CA certificate for the specified AWS Region.

  • RDS has already moved to a new default CA certificate for the specified AWS Region, but you are still in the process of supporting the new CA certificate. In this case, you temporarily need additional time to finish your application changes.

For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate in the Amazon RDS User Guide.

For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide.

" + }, "ModifyCurrentDBClusterCapacity":{ "name":"ModifyCurrentDBClusterCapacity", "http":{ @@ -1739,7 +1788,7 @@ {"shape":"SNSTopicArnNotFoundFault"}, {"shape":"SubscriptionCategoryNotFoundFault"} ], - "documentation":"

Modifies an existing RDS event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" + "documentation":"

Modifies an existing RDS event notification subscription. You can't modify the source identifiers using this call. To change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" }, "ModifyGlobalCluster":{ "name":"ModifyGlobalCluster", @@ -2038,7 +2087,7 @@ {"shape":"DomainNotFoundFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], - "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster. If you don't specify a security group, the new DB cluster is associated with the default security group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot. This action only applies to Aurora DB clusters.

The target DB cluster is created from the source snapshot with a default configuration. If you don't specify a security group, the new DB cluster is associated with the default security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromSnapshot action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "RestoreDBClusterToPointInTime":{ "name":"RestoreDBClusterToPointInTime", @@ -2260,6 +2309,30 @@ ], "documentation":"

Starts an Amazon RDS DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action.

For more information, see Starting an Amazon RDS DB instance That Was Previously Stopped in the Amazon RDS User Guide.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora DB clusters, use StartDBCluster instead.

" }, + "StartExportTask":{ + "name":"StartExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartExportTaskMessage"}, + "output":{ + "shape":"ExportTask", + "resultWrapper":"StartExportTaskResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"ExportTaskAlreadyExistsFault"}, + {"shape":"InvalidS3BucketFault"}, + {"shape":"IamRoleNotFoundFault"}, + {"shape":"IamRoleMissingPermissionsFault"}, + {"shape":"InvalidExportOnlyFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"InvalidExportSourceStateFault"} + ], + "documentation":"

Starts an export of a snapshot to Amazon S3. The provided IAM role must have access to the S3 bucket.

" + }, "StopActivityStream":{ "name":"StopActivityStream", "http":{ @@ -2346,7 +2419,7 @@ "documentation":"

The maximum allowed value for the quota.

" } }, - "documentation":"

Describes a quota for an AWS account.

The following are account quotas:

  • AllocatedStorage - The total allocated storage per account, in GiB. The used value is the total allocated storage in the account, in GiB.

  • AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB security group. The used value is the highest number of ingress rules in a DB security group in the account. Other DB security groups in the account might have a lower number of ingress rules.

  • CustomEndpointsPerDBCluster - The number of custom endpoints per DB cluster. The used value is the highest number of custom endpoints in a DB clusters in the account. Other DB clusters in the account might have a lower number of custom endpoints.

  • DBClusterParameterGroups - The number of DB cluster parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB cluster parameter groups in the account.

  • DBClusterRoles - The number of associated AWS Identity and Access Management (IAM) roles per DB cluster. The used value is the highest number of associated IAM roles for a DB cluster in the account. Other DB clusters in the account might have a lower number of associated IAM roles.

  • DBClusters - The number of DB clusters per account. The used value is the count of DB clusters in the account.

  • DBInstanceRoles - The number of associated IAM roles per DB instance. The used value is the highest number of associated IAM roles for a DB instance in the account. Other DB instances in the account might have a lower number of associated IAM roles.

  • DBInstances - The number of DB instances per account. The used value is the count of the DB instances in the account.

  • DBParameterGroups - The number of DB parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB parameter groups in the account.

  • DBSecurityGroups - The number of DB security groups (not VPC security groups) per account, excluding the default security group. The used value is the count of nondefault DB security groups in the account.

  • DBSubnetGroups - The number of DB subnet groups per account. The used value is the count of the DB subnet groups in the account.

  • EventSubscriptions - The number of event subscriptions per account. The used value is the count of the event subscriptions in the account.

  • ManualSnapshots - The number of manual DB snapshots per account. The used value is the count of the manual DB snapshots in the account.

  • OptionGroups - The number of DB option groups per account, excluding default option groups. The used value is the count of nondefault DB option groups in the account.

  • ReadReplicasPerMaster - The number of Read Replicas per DB instance. The used value is the highest number of Read Replicas for a DB instance in the account. Other DB instances in the account might have a lower number of Read Replicas.

  • ReservedDBInstances - The number of reserved DB instances per account. The used value is the count of the active reserved DB instances in the account.

  • SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. The used value is highest number of subnets for a DB subnet group in the account. Other DB subnet groups in the account might have a lower number of subnets.

For more information, see Limits in the Amazon RDS User Guide and Limits in the Amazon Aurora User Guide.

", + "documentation":"

Describes a quota for an AWS account.

The following are account quotas:

  • AllocatedStorage - The total allocated storage per account, in GiB. The used value is the total allocated storage in the account, in GiB.

  • AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB security group. The used value is the highest number of ingress rules in a DB security group in the account. Other DB security groups in the account might have a lower number of ingress rules.

  • CustomEndpointsPerDBCluster - The number of custom endpoints per DB cluster. The used value is the highest number of custom endpoints in a DB clusters in the account. Other DB clusters in the account might have a lower number of custom endpoints.

  • DBClusterParameterGroups - The number of DB cluster parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB cluster parameter groups in the account.

  • DBClusterRoles - The number of associated AWS Identity and Access Management (IAM) roles per DB cluster. The used value is the highest number of associated IAM roles for a DB cluster in the account. Other DB clusters in the account might have a lower number of associated IAM roles.

  • DBClusters - The number of DB clusters per account. The used value is the count of DB clusters in the account.

  • DBInstanceRoles - The number of associated IAM roles per DB instance. The used value is the highest number of associated IAM roles for a DB instance in the account. Other DB instances in the account might have a lower number of associated IAM roles.

  • DBInstances - The number of DB instances per account. The used value is the count of the DB instances in the account.

    Amazon RDS DB instances, Amazon Aurora DB instances, Amazon Neptune instances, and Amazon DocumentDB instances apply to this quota.

  • DBParameterGroups - The number of DB parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB parameter groups in the account.

  • DBSecurityGroups - The number of DB security groups (not VPC security groups) per account, excluding the default security group. The used value is the count of nondefault DB security groups in the account.

  • DBSubnetGroups - The number of DB subnet groups per account. The used value is the count of the DB subnet groups in the account.

  • EventSubscriptions - The number of event subscriptions per account. The used value is the count of the event subscriptions in the account.

  • ManualSnapshots - The number of manual DB snapshots per account. The used value is the count of the manual DB snapshots in the account.

  • OptionGroups - The number of DB option groups per account, excluding default option groups. The used value is the count of nondefault DB option groups in the account.

  • ReadReplicasPerMaster - The number of Read Replicas per DB instance. The used value is the highest number of Read Replicas for a DB instance in the account. Other DB instances in the account might have a lower number of Read Replicas.

  • ReservedDBInstances - The number of reserved DB instances per account. The used value is the count of the active reserved DB instances in the account.

  • SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. The used value is highest number of subnets for a DB subnet group in the account. Other DB subnet groups in the account might have a lower number of subnets.

For more information, see Quotas for Amazon RDS in the Amazon RDS User Guide and Quotas for Amazon Aurora in the Amazon Aurora User Guide.

", "wrapper":true }, "AccountQuotaList":{ @@ -2664,6 +2737,16 @@ }, "Boolean":{"type":"boolean"}, "BooleanOptional":{"type":"boolean"}, + "CancelExportTaskMessage":{ + "type":"structure", + "required":["ExportTaskIdentifier"], + "members":{ + "ExportTaskIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the snapshot export task to cancel.

" + } + } + }, "Certificate":{ "type":"structure", "members":{ @@ -2690,6 +2773,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) for the certificate.

" + }, + "CustomerOverride":{ + "shape":"BooleanOptional", + "documentation":"

Whether there is an override for the default certificate identifier.

" + }, + "CustomerOverrideValidTill":{ + "shape":"TStamp", + "documentation":"

If there is an override for the default certificate identifier, when the override expires.

" } }, "documentation":"

A CA certificate for an AWS account.

", @@ -2854,11 +2945,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" + "documentation":"

The AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region. Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same AWS Region.

The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region. Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same AWS Region.

The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot is to be created in.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" }, "CopyTags":{ "shape":"BooleanOptional", @@ -3059,7 +3150,7 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon RDS will not create a database in the DB cluster you are creating.

" + "documentation":"

The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.

" }, "DBClusterIdentifier":{ "shape":"String", @@ -3143,7 +3234,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", @@ -3329,7 +3420,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

Indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

" + "documentation":"

Indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance once it is associated with a DB instance

" }, "CharacterSetName":{ "shape":"String", @@ -3773,7 +3864,7 @@ }, "SourceIds":{ "shape":"SourceIdsList", - "documentation":"

The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

Constraints:

  • If SourceIds are supplied, SourceType must also be provided.

  • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.

  • If the source type is a DB security group, a DBSecurityGroupName must be supplied.

  • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.

  • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.

" + "documentation":"

The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.

Constraints:

  • If SourceIds are supplied, SourceType must also be provided.

  • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.

  • If the source type is a DB security group, a DBSecurityGroupName must be supplied.

  • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.

  • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.

" }, "Enabled":{ "shape":"BooleanOptional", @@ -4207,7 +4298,7 @@ "members":{ "Marker":{ "shape":"String", - "documentation":"

A pagination token that can be used in a subsequent DescribeDBClusterBacktracks request.

" + "documentation":"

A pagination token that can be used in a later DescribeDBClusterBacktracks request.

" }, "DBClusterBacktracks":{ "shape":"DBClusterBacktrackList", @@ -4397,7 +4488,7 @@ "members":{ "Marker":{ "shape":"String", - "documentation":"

A pagination token that can be used in a subsequent DescribeDBClusters request.

" + "documentation":"

A pagination token that can be used in a later DescribeDBClusters request.

" }, "DBClusters":{ "shape":"DBClusterList", @@ -5628,7 +5719,7 @@ }, "TrackedClusterId":{ "shape":"String", - "documentation":"

The DB cluster identifier when the target represents an Aurora DB cluster. This field is blank when the target represents an

" + "documentation":"

The DB cluster identifier when the target represents an Aurora DB cluster. This field is blank when the target represents an RDS DB instance.

" }, "RdsResourceId":{ "shape":"String", @@ -6294,7 +6385,7 @@ }, "SkipFinalSnapshot":{ "shape":"Boolean", - "documentation":"

A value that indicates whether to skip the creation of a final DB snapshot before the DB instance is deleted. If skip is specified, no DB snapshot is created. If skip isn't specified, a DB snapshot is created before the DB instance is deleted. By default, skip isn't specified, and the DB snapshot is created.

Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when skip is specified.

Specify skip when deleting a Read Replica.

The FinalDBSnapshotIdentifier parameter must be specified if skip isn't specified.

" + "documentation":"

A value that indicates whether to skip the creation of a final DB snapshot before the DB instance is deleted. If skip is specified, no DB snapshot is created. If skip isn't specified, a DB snapshot is created before the DB instance is deleted. By default, skip isn't specified, and the DB snapshot is created.

When a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when skip is specified.

Specify skip when deleting a Read Replica.

The FinalDBSnapshotIdentifier parameter must be specified if skip isn't specified.

" }, "FinalDBSnapshotIdentifier":{ "shape":"String", @@ -6856,7 +6947,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

A pagination token that can be used in a subsequent DescribeDBLogFiles request.

" + "documentation":"

A pagination token that can be used in a later DescribeDBLogFiles request.

" } }, "documentation":"

The response from a call to DescribeDBLogFiles.

" @@ -7260,6 +7351,31 @@ }, "documentation":"

" }, + "DescribeExportTasksMessage":{ + "type":"structure", + "members":{ + "ExportTaskIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the snapshot export task to be described.

" + }, + "SourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters specify one or more snapshot exports to describe. The filters are specified as name-value pairs that define what to include in the output.

Supported filters include the following:

  • export-task-identifier - An identifier for the snapshot export task.

  • s3-bucket - The Amazon S3 bucket the snapshot is exported to.

  • source-arn - The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3

  • status - The status of the export task.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeExportTasks request. If you specify this parameter, the response includes only records beyond the marker, up to the value specified by the MaxRecords parameter.

" + }, + "MaxRecords":{ + "shape":"String", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified value, a pagination token called a marker is included in the response. You can use the marker in a later DescribeExportTasks request to retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

" + } + } + }, "DescribeGlobalClustersMessage":{ "type":"structure", "members":{ @@ -7625,7 +7741,7 @@ }, "Marker":{ "shape":"String", - "documentation":"

A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

" + "documentation":"

A pagination token that can be used in a later DownloadDBLogFilePortion request.

" }, "AdditionalDataPending":{ "shape":"Boolean", @@ -7904,6 +8020,116 @@ }, "documentation":"

Contains the result of a successful invocation of the DescribeEvents action.

" }, + "ExportTask":{ + "type":"structure", + "members":{ + "ExportTaskIdentifier":{ + "shape":"String", + "documentation":"

A unique identifier for the snapshot export task. This ID isn't an identifier for the Amazon S3 bucket where the snapshot is exported to.

" + }, + "SourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3.

" + }, + "ExportOnly":{ + "shape":"StringList", + "documentation":"

The data exported from the snapshot. Valid values are the following:

  • database - Export all the data of the snapshot.

  • database.table [table-name] - Export a table of the snapshot.

  • database.schema [schema-name] - Export a database schema of the snapshot. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

  • database.schema.table [table-name] - Export a table of the database schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

" + }, + "SnapshotTime":{ + "shape":"TStamp", + "documentation":"

The time that the snapshot was created.

" + }, + "TaskStartTime":{ + "shape":"TStamp", + "documentation":"

The time that the snapshot export task started.

" + }, + "TaskEndTime":{ + "shape":"TStamp", + "documentation":"

The time that the snapshot export task completed.

" + }, + "S3Bucket":{ + "shape":"String", + "documentation":"

The Amazon S3 bucket that the snapshot is exported to.

" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"

The Amazon S3 bucket prefix that is the file name and path of the exported snapshot.

" + }, + "IamRoleArn":{ + "shape":"String", + "documentation":"

The name of the IAM role that is used to write to Amazon S3 when exporting a snapshot.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the AWS KMS key that is used to encrypt the snapshot when it's exported to Amazon S3. The KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, or the KMS key alias for the KMS encryption key. The IAM role used for the snapshot export must have encryption and decryption permissions to use this KMS key.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The progress status of the export task.

" + }, + "PercentProgress":{ + "shape":"Integer", + "documentation":"

The progress of the snapshot export task as a percentage.

" + }, + "TotalExtractedDataInGB":{ + "shape":"Integer", + "documentation":"

The total amount of data exported, in gigabytes.

" + }, + "FailureCause":{ + "shape":"String", + "documentation":"

The reason the export failed, if it failed.

" + }, + "WarningMessage":{ + "shape":"String", + "documentation":"

A warning about the snapshot export task.

" + } + }, + "documentation":"

Contains the details of a snapshot export to Amazon S3.

This data type is used as a response element in the DescribeExportTasks action.

" + }, + "ExportTaskAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You can't start an export task that's already running.

", + "error":{ + "code":"ExportTaskAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ExportTaskNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The export task doesn't exist.

", + "error":{ + "code":"ExportTaskNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ExportTasksList":{ + "type":"list", + "member":{ + "shape":"ExportTask", + "locationName":"ExportTask" + } + }, + "ExportTasksMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

A pagination token that can be used in a later DescribeExportTasks request. A marker is used for pagination to identify the location to begin output for the next response of DescribeExportTasks.

" + }, + "ExportTasks":{ + "shape":"ExportTasksList", + "documentation":"

Information about an export of a snapshot to Amazon S3.

" + } + } + }, "FailoverDBClusterMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -8118,6 +8344,30 @@ "locationName":"IPRange" } }, + "IamRoleMissingPermissionsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The IAM role requires additional permissions to export to an Amazon S3 bucket.

", + "error":{ + "code":"IamRoleMissingPermissions", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IamRoleNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The IAM role is missing for exporting to an Amazon S3 bucket.

", + "error":{ + "code":"IamRoleNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "ImportInstallationMediaMessage":{ "type":"structure", "required":[ @@ -8460,6 +8710,42 @@ }, "exception":true }, + "InvalidExportOnlyFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The export is invalid for exporting to an Amazon S3 bucket.

", + "error":{ + "code":"InvalidExportOnly", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidExportSourceStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The state of the export snapshot is invalid for exporting to an Amazon S3 bucket.

", + "error":{ + "code":"InvalidExportSourceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidExportTaskStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You can't cancel an export task that has completed.

", + "error":{ + "code":"InvalidExportTaskStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidGlobalClusterStateFault":{ "type":"structure", "members":{ @@ -8595,6 +8881,25 @@ "locationName":"MinimumEngineVersionPerAllowedValue" } }, + "ModifyCertificatesMessage":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{ + "shape":"String", + "documentation":"

The new default certificate identifier to override the current one with.

To determine the valid values, use the describe-certificates AWS CLI command or the DescribeCertificates API operation.

" + }, + "RemoveCustomerOverride":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether to remove the override for the default certificate. If the override is removed, the default certificate is the system default.

" + } + } + }, + "ModifyCertificatesResult":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"Certificate"} + } + }, "ModifyCurrentDBClusterCapacityMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -8953,6 +9258,10 @@ "MaxAllocatedStorage":{ "shape":"IntegerOptional", "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

" + }, + "CertificateRotationRestart":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether the DB instance is restarted when you rotate your SSL/TLS certificate.

By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.

Set this parameter only if you are not using SSL/TLS to connect to the DB instance.

If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:

" } }, "documentation":"

" @@ -8976,7 +9285,7 @@ }, "Parameters":{ "shape":"ParametersList", - "documentation":"

An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

Valid Values (for the application method): immediate | pending-reboot

You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.

" + "documentation":"

An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; later arguments are optional. A maximum of 20 parameters can be modified in a single request.

Valid Values (for the application method): immediate | pending-reboot

You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.

" } }, "documentation":"

" @@ -11668,6 +11977,46 @@ "DBInstance":{"shape":"DBInstance"} } }, + "StartExportTaskMessage":{ + "type":"structure", + "required":[ + "ExportTaskIdentifier", + "SourceArn", + "S3BucketName", + "IamRoleArn", + "KmsKeyId" + ], + "members":{ + "ExportTaskIdentifier":{ + "shape":"String", + "documentation":"

A unique identifier for the snapshot export task. This ID isn't an identifier for the Amazon S3 bucket where the snapshot is to be exported to.

" + }, + "SourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the snapshot to export to Amazon S3.

" + }, + "S3BucketName":{ + "shape":"String", + "documentation":"

The name of the Amazon S3 bucket to export the snapshot to.

" + }, + "IamRoleArn":{ + "shape":"String", + "documentation":"

The name of the IAM role to use for writing to the Amazon S3 bucket when exporting a snapshot.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the AWS KMS key to use to encrypt the snapshot exported to Amazon S3. The KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, or the KMS key alias for the KMS encryption key. The IAM role used for the snapshot export must have encryption and decryption permissions to use this KMS key.

" + }, + "S3Prefix":{ + "shape":"String", + "documentation":"

The Amazon S3 bucket prefix to use as the file name and path of the exported snapshot.

" + }, + "ExportOnly":{ + "shape":"StringList", + "documentation":"

The data to be exported from the snapshot. If this parameter is not provided, all the snapshot data is exported. Valid values are the following:

  • database - Export all the data of the snapshot.

  • database.table [table-name] - Export a table of the snapshot.

  • database.schema [schema-name] - Export a database schema of the snapshot. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

  • database.schema.table [table-name] - Export a table of the database schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

" + } + } + }, "StopActivityStreamRequest":{ "type":"structure", "required":["ResourceArn"], @@ -12118,5 +12467,5 @@ "documentation":"

Information about the virtual private network (VPN) between the VMware vSphere cluster and the AWS website.

For more information about RDS on VMware, see the RDS on VMware User Guide.

" } }, - "documentation":"Amazon Relational Database Service

Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizeable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities mean that the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your DB instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

This interface reference for Amazon RDS contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide.

Amazon RDS API Reference

Amazon RDS User Guide

" + "documentation":"Amazon Relational Database Service

Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizeable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities mean that the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your DB instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

This interface reference for Amazon RDS contains documentation for a programming or command line interface you can use to manage Amazon RDS. Amazon RDS is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide.

Amazon RDS API Reference

Amazon RDS User Guide

" } diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 25089bfb..6f6ac039 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -104,7 +104,7 @@ {"shape":"InvalidRetentionPeriodFault"}, {"shape":"BatchModifyClusterSnapshotsLimitExceededFault"} ], - "documentation":"

Modifies the settings for a list of snapshots.

" + "documentation":"

Modifies the settings for a set of cluster snapshots.

" }, "CancelResize":{ "name":"CancelResize", @@ -123,7 +123,7 @@ {"shape":"InvalidClusterStateFault"}, {"shape":"UnsupportedOperationFault"} ], - "documentation":"

Cancels a resize operation.

" + "documentation":"

Cancels a resize operation for a cluster.

" }, "CopyClusterSnapshot":{ "name":"CopyClusterSnapshot", @@ -180,7 +180,7 @@ {"shape":"SnapshotScheduleNotFoundFault"}, {"shape":"InvalidRetentionPeriodFault"} ], - "documentation":"

Creates a new cluster.

To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Creates a new cluster with the specified parameters.

To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, "CreateClusterParameterGroup":{ "name":"CreateClusterParameterGroup", @@ -389,7 +389,7 @@ {"shape":"TagLimitExceededFault"}, {"shape":"ScheduleDefinitionTypeUnsupportedFault"} ], - "documentation":"

Creates a new snapshot schedule.

" + "documentation":"

Creates a snapshot schedule with the rate of every 12 hours.

" }, "CreateTags":{ "name":"CreateTags", @@ -403,7 +403,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Adds one or more tags to a specified resource.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

" + "documentation":"

Adds tags to a cluster.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

" }, "DeleteCluster":{ "name":"DeleteCluster", @@ -423,7 +423,7 @@ {"shape":"ClusterSnapshotQuotaExceededFault"}, {"shape":"InvalidRetentionPeriodFault"} ], - "documentation":"

Deletes a previously provisioned cluster. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be \"final-snapshot\" while the snapshot is being taken, then it's \"deleting\" once Amazon Redshift begins deleting the cluster.

For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Deletes a previously provisioned cluster without its final snapshot being created. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be \"final-snapshot\" while the snapshot is being taken, then it's \"deleting\" once Amazon Redshift begins deleting the cluster.

For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, "DeleteClusterParameterGroup":{ "name":"DeleteClusterParameterGroup", @@ -571,7 +571,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Deletes a tag or tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

" + "documentation":"

Deletes tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

" }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", @@ -982,7 +982,7 @@ "shape":"CustomerStorageMessage", "resultWrapper":"DescribeStorageResult" }, - "documentation":"

Returns the total amount of snapshot usage and provisioned storage in megabytes.

" + "documentation":"

Returns account level backups storage size and provisional storage.

" }, "DescribeTableRestoreStatus":{ "name":"DescribeTableRestoreStatus", @@ -1169,7 +1169,7 @@ {"shape":"InvalidClusterTrackFault"}, {"shape":"InvalidRetentionPeriodFault"} ], - "documentation":"

Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

" + "documentation":"

Modifies the settings for a cluster.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

You can add another security or parameter group, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, "ModifyClusterDbRevision":{ "name":"ModifyClusterDbRevision", @@ -1220,7 +1220,7 @@ "errors":[ {"shape":"ClusterNotFoundFault"} ], - "documentation":"

Modifies the maintenance settings of a cluster. For example, you can defer a maintenance window. You can also update or cancel a deferment.

" + "documentation":"

Modifies the maintenance settings of a cluster.

" }, "ModifyClusterParameterGroup":{ "name":"ModifyClusterParameterGroup", @@ -1255,7 +1255,7 @@ {"shape":"ClusterSnapshotNotFoundFault"}, {"shape":"InvalidRetentionPeriodFault"} ], - "documentation":"

Modifies the settings for a snapshot.

" + "documentation":"

Modifies the settings for a snapshot.

This exanmple modifies the manual retention period setting for a cluster snapshot.

" }, "ModifyClusterSnapshotSchedule":{ "name":"ModifyClusterSnapshotSchedule", @@ -1334,7 +1334,7 @@ {"shape":"InvalidScheduledActionFault"}, {"shape":"UnauthorizedOperation"} ], - "documentation":"

Modify a scheduled action.

" + "documentation":"

Modifies a scheduled action.

" }, "ModifySnapshotCopyRetentionPeriod":{ "name":"ModifySnapshotCopyRetentionPeriod", @@ -1449,7 +1449,7 @@ {"shape":"UnauthorizedOperation"}, {"shape":"LimitExceededFault"} ], - "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

  • The type of nodes that you add must match the node type for the cluster.

" + "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

" }, "RestoreFromClusterSnapshot":{ "name":"RestoreFromClusterSnapshot", @@ -2828,7 +2828,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge

" + "documentation":"

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.16xlarge

" }, "MasterUsername":{ "shape":"String", @@ -2852,7 +2852,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.

Default: A random, system-chosen Availability Zone in the region that is specified by the endpoint.

Example: us-east-1d

Constraint: The specified Availability Zone must be in the same region as the current endpoint.

" + "documentation":"

The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.

Default: A random, system-chosen Availability Zone in the region that is specified by the endpoint.

Example: us-east-2d

Constraint: The specified Availability Zone must be in the same region as the current endpoint.

" }, "PreferredMaintenanceWindow":{ "shape":"String", @@ -3086,7 +3086,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, and cluster-snapshot.

" + "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

" }, "SourceIds":{ "shape":"SourceIdsList", @@ -3292,7 +3292,7 @@ "members":{ "ResourceName":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

" + "documentation":"

The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

" }, "Tags":{ "shape":"TagList", @@ -3558,7 +3558,7 @@ "members":{ "ResourceName":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

" + "documentation":"

The Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -3867,7 +3867,7 @@ "members":{ "SourceType":{ "shape":"String", - "documentation":"

The source type, such as cluster or parameter group, to which the described event categories apply.

Valid values: cluster, cluster-snapshot, cluster-parameter-group, and cluster-security-group.

" + "documentation":"

The source type, such as cluster or parameter group, to which the described event categories apply.

Valid values: cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, and scheduled-action.

" } }, "documentation":"

" @@ -4231,7 +4231,7 @@ "members":{ "ResourceName":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

" + "documentation":"

The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

" }, "ResourceType":{ "shape":"String", @@ -4543,7 +4543,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The source type of the events returned the Amazon Redshift event notification, such as cluster, or cluster-snapshot.

" + "documentation":"

The source type of the events returned by the Amazon Redshift event notification, such as cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, or scheduled-action.

" }, "SourceIdsList":{ "shape":"SourceIdsList", @@ -5409,11 +5409,11 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge

" + "documentation":"

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.16xlarge

" }, "NumberOfNodes":{ "shape":"IntegerOptional", - "documentation":"

The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter.

When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request.

Valid Values: Integer greater than 0.

" + "documentation":"

The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: Integer greater than 0.

" }, "ClusterSecurityGroups":{ "shape":"ClusterSecurityGroupNameList", @@ -5481,7 +5481,7 @@ }, "Encrypted":{ "shape":"BooleanOptional", - "documentation":"

Indicates whether the cluster is encrypted. If the cluster is encrypted and you provide a value for the KmsKeyId parameter, we will encrypt the cluster with the provided KmsKeyId. If you don't provide a KmsKeyId, we will encrypt with the default key. In the China region we will use legacy encryption if you specify that the cluster is encrypted.

" + "documentation":"

Indicates whether the cluster is encrypted. If the value is encrypted (true) and you provide a value for the KmsKeyId parameter, we encrypt the cluster with the provided KmsKeyId. If you don't provide a KmsKeyId, we encrypt with the default key. In the China region we use legacy encryption if you specify that the cluster is encrypted.

If the value is not encrypted (false), then the cluster is decrypted.

" }, "KmsKeyId":{ "shape":"String", @@ -5598,7 +5598,7 @@ }, "SourceType":{ "shape":"String", - "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, and cluster-snapshot.

" + "documentation":"

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

" }, "SourceIds":{ "shape":"SourceIdsList", @@ -6447,7 +6447,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Amazon EC2 Availability Zone in which to restore the cluster.

Default: A random, system-chosen Availability Zone.

Example: us-east-1a

" + "documentation":"

The Amazon EC2 Availability Zone in which to restore the cluster.

Default: A random, system-chosen Availability Zone.

Example: us-east-2a

" }, "AllowVersionUpgrade":{ "shape":"BooleanOptional", @@ -7737,7 +7737,7 @@ }, "ResourceName":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) with which the tag is associated, for example: arn:aws:redshift:us-east-1:123456789:cluster:t1.

" + "documentation":"

The Amazon Resource Name (ARN) with which the tag is associated, for example: arn:aws:redshift:us-east-2:123456789:cluster:t1.

" }, "ResourceType":{ "shape":"String", diff --git a/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json b/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json index 3c325b60..d405b011 100644 --- a/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json +++ b/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json @@ -152,11 +152,11 @@ "members":{ "NoncompliantKeys":{ "shape":"TagKeyList", - "documentation":"

The tag key is noncompliant with the effective tag policy.

" + "documentation":"

These tag keys on the resource are noncompliant with the effective tag policy.

" }, "KeysWithNoncompliantValues":{ "shape":"TagKeyList", - "documentation":"

The tag value is noncompliant with the effective tag policy.

" + "documentation":"

These are keys defined in the effective policy that are on the resource with either incorrect case treatment or noncompliant values.

" }, "ComplianceStatus":{ "shape":"ComplianceStatus", @@ -239,7 +239,7 @@ "documentation":"

The message of the common error.

" } }, - "documentation":"

Details of the common errors that all actions return.

" + "documentation":"

Information about the errors that are returned for each failed resource. This information can include InternalServiceException and InvalidParameterException errors. It can also include any valid error code returned by the AWS service that hosts the resource that the ARN key represents.

The following are common error codes that you might receive from other AWS services:

  • InternalServiceException – This can mean that the Resource Groups Tagging API didn't receive a response from another AWS service. It can also mean the the resource type in the request is not supported by the Resource Groups Tagging API. In these cases, it's safe to retry the request and then call GetResources to verify the changes.

  • AccessDeniedException – This can mean that you need permission to calling tagging operations in the AWS service that contains the resource. For example, to use the Resource Groups Tagging API to tag a CloudWatch alarm resource, you need permission to call TagResources and TagResource in the CloudWatch API.

For more information on errors that are generated from other AWS services, see the documentation for that service.

" }, "GetComplianceSummaryInput":{ "type":"structure", @@ -635,7 +635,7 @@ "members":{ "FailedResourcesMap":{ "shape":"FailedResourcesMap", - "documentation":"

Details of resources that could not be tagged. An error code, status code, and error message are returned for each failed item.

" + "documentation":"

A map containing a key-value pair for each failed item that couldn't be tagged. The key is the ARN of the failed resource. The value is a FailureInfo object that contains an error code, a status code, and an error message. If there are no errors, the FailedResourcesMap is empty.

" } } }, @@ -711,5 +711,5 @@ } } }, - "documentation":"Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

  • Tag and untag supported resources located in the specified Region for the AWS account.

  • Use tag-based filters to search for resources located in the specified Region for the AWS account.

  • List all existing tag keys in the specified Region for the AWS account.

  • List all existing values for the specified key in the specified Region for the AWS account.

To use resource groups tagging API operations, you must add the following permissions to your IAM policy:

  • tag:GetResources

  • tag:TagResources

  • tag:UntagResources

  • tag:GetTagKeys

  • tag:GetTagValues

You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.

For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

  • Alexa for Business (a4b)

  • API Gateway

  • Amazon AppStream

  • AWS AppSync

  • AWS App Mesh

  • Amazon Athena

  • Amazon Aurora

  • AWS Backup

  • AWS Certificate Manager

  • AWS Certificate Manager Private CA

  • Amazon Cloud Directory

  • AWS CloudFormation

  • Amazon CloudFront

  • AWS CloudHSM

  • AWS CloudTrail

  • Amazon CloudWatch (alarms only)

  • Amazon CloudWatch Events

  • Amazon CloudWatch Logs

  • AWS CodeBuild

  • AWS CodeCommit

  • AWS CodePipeline

  • AWS CodeStar

  • Amazon Cognito Identity

  • Amazon Cognito User Pools

  • Amazon Comprehend

  • AWS Config

  • AWS Data Pipeline

  • AWS Database Migration Service

  • AWS DataSync

  • AWS Direct Connect

  • AWS Directory Service

  • Amazon DynamoDB

  • Amazon EBS

  • Amazon EC2

  • Amazon ECR

  • Amazon ECS

  • AWS Elastic Beanstalk

  • Amazon Elastic File System

  • Elastic Load Balancing

  • Amazon ElastiCache

  • Amazon Elasticsearch Service

  • AWS Elemental MediaLive

  • AWS Elemental MediaPackage

  • AWS Elemental MediaTailor

  • Amazon EMR

  • Amazon FSx

  • Amazon S3 Glacier

  • AWS Glue

  • Amazon GuardDuty

  • Amazon Inspector

  • AWS IoT Analytics

  • AWS IoT Core

  • AWS IoT Device Defender

  • AWS IoT Device Management

  • AWS IoT Events

  • AWS IoT Greengrass

  • AWS Key Management Service

  • Amazon Kinesis

  • Amazon Kinesis Data Analytics

  • Amazon Kinesis Data Firehose

  • AWS Lambda

  • AWS License Manager

  • Amazon Machine Learning

  • Amazon MQ

  • Amazon MSK

  • Amazon Neptune

  • AWS OpsWorks

  • AWS Organizations

  • Amazon Quantum Ledger Database (QLDB)

  • Amazon RDS

  • Amazon Redshift

  • AWS Resource Access Manager

  • AWS Resource Groups

  • AWS RoboMaker

  • Amazon Route 53

  • Amazon Route 53 Resolver

  • Amazon S3 (buckets only)

  • Amazon SageMaker

  • AWS Secrets Manager

  • AWS Security Hub

  • AWS Service Catalog

  • Amazon Simple Notification Service (SNS)

  • Amazon Simple Queue Service (SQS)

  • AWS Step Functions

  • AWS Storage Gateway

  • AWS Systems Manager

  • AWS Transfer for SFTP

  • Amazon VPC

  • Amazon WorkSpaces

" + "documentation":"Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

  • Tag and untag supported resources located in the specified Region for the AWS account.

  • Use tag-based filters to search for resources located in the specified Region for the AWS account.

  • List all existing tag keys in the specified Region for the AWS account.

  • List all existing values for the specified key in the specified Region for the AWS account.

To use resource groups tagging API operations, you must add the following permissions to your IAM policy:

  • tag:GetResources

  • tag:TagResources

  • tag:UntagResources

  • tag:GetTagKeys

  • tag:GetTagValues

You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.

For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

  • Alexa for Business (a4b)

  • API Gateway

  • Amazon AppStream

  • AWS AppSync

  • AWS App Mesh

  • Amazon Athena

  • Amazon Aurora

  • AWS Backup

  • AWS Certificate Manager

  • AWS Certificate Manager Private CA

  • Amazon Cloud Directory

  • AWS CloudFormation

  • Amazon CloudFront

  • AWS CloudHSM

  • AWS CloudTrail

  • Amazon CloudWatch (alarms only)

  • Amazon CloudWatch Events

  • Amazon CloudWatch Logs

  • AWS CodeBuild

  • AWS CodeCommit

  • AWS CodePipeline

  • AWS CodeStar

  • Amazon Cognito Identity

  • Amazon Cognito User Pools

  • Amazon Comprehend

  • AWS Config

  • AWS Data Exchange

  • AWS Data Pipeline

  • AWS Database Migration Service

  • AWS DataSync

  • AWS Device Farm

  • AWS Direct Connect

  • AWS Directory Service

  • Amazon DynamoDB

  • Amazon EBS

  • Amazon EC2

  • Amazon ECR

  • Amazon ECS

  • Amazon EKS

  • AWS Elastic Beanstalk

  • Amazon Elastic File System

  • Elastic Load Balancing

  • Amazon ElastiCache

  • Amazon Elasticsearch Service

  • AWS Elemental MediaLive

  • AWS Elemental MediaPackage

  • AWS Elemental MediaTailor

  • Amazon EMR

  • Amazon FSx

  • Amazon S3 Glacier

  • AWS Glue

  • Amazon GuardDuty

  • Amazon Inspector

  • AWS IoT Analytics

  • AWS IoT Core

  • AWS IoT Device Defender

  • AWS IoT Device Management

  • AWS IoT Events

  • AWS IoT Greengrass

  • AWS IoT 1-Click

  • AWS Key Management Service

  • Amazon Kinesis

  • Amazon Kinesis Data Analytics

  • Amazon Kinesis Data Firehose

  • AWS Lambda

  • AWS License Manager

  • Amazon Machine Learning

  • Amazon MQ

  • Amazon MSK

  • Amazon Neptune

  • AWS OpsWorks

  • AWS Organizations

  • Amazon Quantum Ledger Database (QLDB)

  • Amazon RDS

  • Amazon Redshift

  • AWS Resource Access Manager

  • AWS Resource Groups

  • AWS RoboMaker

  • Amazon Route 53

  • Amazon Route 53 Resolver

  • Amazon S3 (buckets only)

  • Amazon SageMaker

  • AWS Secrets Manager

  • AWS Security Hub

  • AWS Service Catalog

  • Amazon Simple Notification Service (SNS)

  • Amazon Simple Queue Service (SQS)

  • Amazon Simple Workflow Service

  • AWS Step Functions

  • AWS Storage Gateway

  • AWS Systems Manager

  • AWS Transfer for SFTP

  • Amazon VPC

  • Amazon WorkSpaces

" } diff --git a/botocore/data/robomaker/2018-06-29/paginators-1.json b/botocore/data/robomaker/2018-06-29/paginators-1.json index 314046d4..62706571 100644 --- a/botocore/data/robomaker/2018-06-29/paginators-1.json +++ b/botocore/data/robomaker/2018-06-29/paginators-1.json @@ -35,6 +35,12 @@ "limit_key": "maxResults", "output_token": "nextToken", "result_key": "simulationJobSummaries" + }, + "ListSimulationJobBatches": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "simulationJobBatchSummaries" } } } diff --git a/botocore/data/robomaker/2018-06-29/service-2.json b/botocore/data/robomaker/2018-06-29/service-2.json index 1a0222b2..4f5094ef 100644 --- a/botocore/data/robomaker/2018-06-29/service-2.json +++ b/botocore/data/robomaker/2018-06-29/service-2.json @@ -61,6 +61,22 @@ ], "documentation":"

Cancels the specified simulation job.

" }, + "CancelSimulationJobBatch":{ + "name":"CancelSimulationJobBatch", + "http":{ + "method":"POST", + "requestUri":"/cancelSimulationJobBatch" + }, + "input":{"shape":"CancelSimulationJobBatchRequest"}, + "output":{"shape":"CancelSimulationJobBatchResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch.

" + }, "CreateDeploymentJob":{ "name":"CreateDeploymentJob", "http":{ @@ -374,6 +390,21 @@ ], "documentation":"

Describes a simulation job.

" }, + "DescribeSimulationJobBatch":{ + "name":"DescribeSimulationJobBatch", + "http":{ + "method":"POST", + "requestUri":"/describeSimulationJobBatch" + }, + "input":{"shape":"DescribeSimulationJobBatchRequest"}, + "output":{"shape":"DescribeSimulationJobBatchResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Describes a simulation job batch.

" + }, "ListDeploymentJobs":{ "name":"ListDeploymentJobs", "http":{ @@ -388,7 +419,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.

" + "documentation":"

Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.

" }, "ListFleets":{ "name":"ListFleets", @@ -452,6 +483,20 @@ ], "documentation":"

Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications.

" }, + "ListSimulationJobBatches":{ + "name":"ListSimulationJobBatches", + "http":{ + "method":"POST", + "requestUri":"/listSimulationJobBatches" + }, + "input":{"shape":"ListSimulationJobBatchesRequest"}, + "output":{"shape":"ListSimulationJobBatchesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs.

" + }, "ListSimulationJobs":{ "name":"ListSimulationJobs", "http":{ @@ -517,6 +562,23 @@ ], "documentation":"

Restarts a running simulation job.

" }, + "StartSimulationJobBatch":{ + "name":"StartSimulationJobBatch", + "http":{ + "method":"POST", + "requestUri":"/startSimulationJobBatch" + }, + "input":{"shape":"StartSimulationJobBatchRequest"}, + "output":{"shape":"StartSimulationJobBatchResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects.

" + }, "SyncDeploymentJob":{ "name":"SyncDeploymentJob", "http":{ @@ -647,6 +709,21 @@ } } }, + "BatchPolicy":{ + "type":"structure", + "members":{ + "timeoutInSeconds":{ + "shape":"BatchTimeoutInSeconds", + "documentation":"

The amount of time, in seconds, to wait for the batch to complete.

If a batch times out, and there are pending requests that were failing due to an internal failure (like InternalServiceError), they will be moved to the failed list and the batch status will be Failed. If the pending requests were failing for any other reason, the failed pending requests will be moved to the failed list and the batch status will be TimedOut.

" + }, + "maxConcurrency":{ + "shape":"MaxConcurrency", + "documentation":"

The number of active simulation jobs create as part of the batch that can be in an active state at the same time.

Active states include: Pending,Preparing, Running, Restarting, RunningFailed and Terminating. All other states are terminal states.

" + } + }, + "documentation":"

Information about the batch policy.

" + }, + "BatchTimeoutInSeconds":{"type":"long"}, "Boolean":{"type":"boolean"}, "BoxedBoolean":{"type":"boolean"}, "CancelDeploymentJobRequest":{ @@ -664,6 +741,21 @@ "members":{ } }, + "CancelSimulationJobBatchRequest":{ + "type":"structure", + "required":["batch"], + "members":{ + "batch":{ + "shape":"Arn", + "documentation":"

The id of the batch to cancel.

" + } + } + }, + "CancelSimulationJobBatchResponse":{ + "type":"structure", + "members":{ + } + }, "CancelSimulationJobRequest":{ "type":"structure", "required":["job"], @@ -825,7 +917,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite used by the robot application.

" + "documentation":"

The robot software suite (ROS distribuition) used by the robot application.

" }, "tags":{ "shape":"TagMap", @@ -854,7 +946,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite used by the robot application.

" + "documentation":"

The robot software suite (ROS distribution) used by the robot application.

" }, "lastUpdatedAt":{ "shape":"LastUpdatedAt", @@ -905,7 +997,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite used by the robot application.

" + "documentation":"

The robot software suite (ROS distribution) used by the robot application.

" }, "lastUpdatedAt":{ "shape":"LastUpdatedAt", @@ -995,7 +1087,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite of the simulation application.

" + "documentation":"

The robot software suite (ROS distribution) used by the simulation application.

" }, "renderingEngine":{ "shape":"RenderingEngine", @@ -1032,7 +1124,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about the robot software suite.

" + "documentation":"

Information about the robot software suite (ROS distribution).

" }, "renderingEngine":{ "shape":"RenderingEngine", @@ -1091,7 +1183,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about the robot software suite.

" + "documentation":"

Information about the robot software suite (ROS distribution).

" }, "renderingEngine":{ "shape":"RenderingEngine", @@ -1149,7 +1241,7 @@ }, "dataSources":{ "shape":"DataSourceConfigs", - "documentation":"

The data sources for the simulation job.

There is a limit of 100 files and a combined size of 25GB for all DataSourceConfig objects.

" + "documentation":"

Specify data sources to mount read-only files from S3 into your simulation. These files are available under /opt/robomaker/datasources/data_source_name.

There is a limit of 100 files and a combined size of 25GB for all DataSourceConfig objects.

" }, "tags":{ "shape":"TagMap", @@ -1161,6 +1253,11 @@ } } }, + "CreateSimulationJobRequests":{ + "type":"list", + "member":{"shape":"SimulationJobRequest"}, + "min":1 + }, "CreateSimulationJobResponse":{ "type":"structure", "members":{ @@ -1691,7 +1788,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite used by the robot application.

" + "documentation":"

The robot software suite (ROS distribution) used by the robot application.

" }, "revisionId":{ "shape":"RevisionId", @@ -1801,7 +1898,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about the robot software suite.

" + "documentation":"

Information about the robot software suite (ROS distribution).

" }, "renderingEngine":{ "shape":"RenderingEngine", @@ -1821,6 +1918,69 @@ } } }, + "DescribeSimulationJobBatchRequest":{ + "type":"structure", + "required":["batch"], + "members":{ + "batch":{ + "shape":"Arn", + "documentation":"

The id of the batch to describe.

" + } + } + }, + "DescribeSimulationJobBatchResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the batch.

" + }, + "status":{ + "shape":"SimulationJobBatchStatus", + "documentation":"

The status of the batch.

Pending

The simulation job batch request is pending.

InProgress

The simulation job batch is in progress.

Failed

The simulation job batch failed. One or more simulation job requests could not be completed due to an internal failure (like InternalServiceError). See failureCode and failureReason for more information.

Completed

The simulation batch job completed. A batch is complete when (1) there are no pending simulation job requests in the batch and none of the failed simulation job requests are due to InternalServiceError and (2) when all created simulation jobs have reached a terminal state (for example, Completed or Failed).

Canceled

The simulation batch job was cancelled.

Canceling

The simulation batch job is being cancelled.

Completing

The simulation batch job is completing.

TimingOut

The simulation job batch is timing out.

If a batch timing out, and there are pending requests that were failing due to an internal failure (like InternalServiceError), the batch status will be Failed. If there are no such failing request, the batch status will be TimedOut.

TimedOut

The simulation batch job timed out.

" + }, + "lastUpdatedAt":{ + "shape":"LastUpdatedAt", + "documentation":"

The time, in milliseconds since the epoch, when the simulation job batch was last updated.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The time, in milliseconds since the epoch, when the simulation job batch was created.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "batchPolicy":{ + "shape":"BatchPolicy", + "documentation":"

The batch policy.

" + }, + "failureCode":{ + "shape":"SimulationJobBatchErrorCode", + "documentation":"

The failure code of the simulation job batch.

" + }, + "failureReason":{ + "shape":"GenericString", + "documentation":"

The reason the simulation job batch failed.

" + }, + "failedRequests":{ + "shape":"FailedCreateSimulationJobRequests", + "documentation":"

A list of failed create simulation job requests. The request failed to be created into a simulation job. Failed requests do not have a simulation job ID.

" + }, + "pendingRequests":{ + "shape":"CreateSimulationJobRequests", + "documentation":"

A list of pending simulation job requests. These requests have not yet been created into simulation jobs.

" + }, + "createdRequests":{ + "shape":"SimulationJobSummaries", + "documentation":"

A list of created simulation job summaries.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job batch.

" + } + } + }, "DescribeSimulationJobRequest":{ "type":"structure", "required":["job"], @@ -1935,6 +2095,33 @@ "min":1, "pattern":".*" }, + "FailedAt":{"type":"timestamp"}, + "FailedCreateSimulationJobRequest":{ + "type":"structure", + "members":{ + "request":{ + "shape":"SimulationJobRequest", + "documentation":"

The simulation job request.

" + }, + "failureReason":{ + "shape":"GenericString", + "documentation":"

The failure reason of the simulation job request.

" + }, + "failureCode":{ + "shape":"SimulationJobErrorCode", + "documentation":"

The failure code.

" + }, + "failedAt":{ + "shape":"FailedAt", + "documentation":"

The time, in milliseconds since the epoch, when the simulation job batch failed.

" + } + }, + "documentation":"

Information about a failed create simulation job request.

" + }, + "FailedCreateSimulationJobRequests":{ + "type":"list", + "member":{"shape":"FailedCreateSimulationJobRequest"} + }, "FailureBehavior":{ "type":"string", "enum":[ @@ -2027,6 +2214,7 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", "members":{ @@ -2092,11 +2280,11 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The nextToken value returned from a previous paginated ListDeploymentJobs request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + "documentation":"

The nextToken value returned from a previous paginated ListDeploymentJobs request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of deployment job results returned by ListDeploymentJobs in paginated output. When this parameter is used, ListDeploymentJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListDeploymentJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListDeploymentJobs returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

When this parameter is used, ListDeploymentJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListDeploymentJobs request with the returned nextToken value. This value can be between 1 and 200. If this parameter is not used, then ListDeploymentJobs returns up to 200 results and a nextToken value if applicable.

" } } }, @@ -2122,7 +2310,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of deployment job results returned by ListFleets in paginated output. When this parameter is used, ListFleets only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListFleets returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

When this parameter is used, ListFleets only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFleets request with the returned nextToken value. This value can be between 1 and 200. If this parameter is not used, then ListFleets returns up to 200 results and a nextToken value if applicable.

" }, "filters":{ "shape":"Filters", @@ -2152,11 +2340,11 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The nextToken value returned from a previous paginated ListRobotApplications request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + "documentation":"

The nextToken value returned from a previous paginated ListRobotApplications request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of deployment job results returned by ListRobotApplications in paginated output. When this parameter is used, ListRobotApplications only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRobotApplications request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRobotApplications returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

When this parameter is used, ListRobotApplications only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRobotApplications request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRobotApplications returns up to 100 results and a nextToken value if applicable.

" }, "filters":{ "shape":"Filters", @@ -2182,11 +2370,11 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The nextToken value returned from a previous paginated ListRobots request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + "documentation":"

The nextToken value returned from a previous paginated ListRobots request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of deployment job results returned by ListRobots in paginated output. When this parameter is used, ListRobots only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRobots request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRobots returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

When this parameter is used, ListRobots only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRobots request with the returned nextToken value. This value can be between 1 and 200. If this parameter is not used, then ListRobots returns up to 200 results and a nextToken value if applicable.

" }, "filters":{ "shape":"Filters", @@ -2216,11 +2404,11 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The nextToken value returned from a previous paginated ListSimulationApplications request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + "documentation":"

The nextToken value returned from a previous paginated ListSimulationApplications request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of deployment job results returned by ListSimulationApplications in paginated output. When this parameter is used, ListSimulationApplications only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListSimulationApplications request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationApplications returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

When this parameter is used, ListSimulationApplications only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListSimulationApplications request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationApplications returns up to 100 results and a nextToken value if applicable.

" }, "filters":{ "shape":"Filters", @@ -2241,6 +2429,36 @@ } } }, + "ListSimulationJobBatchesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The nextToken value returned from a previous paginated ListSimulationJobBatches request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

When this parameter is used, ListSimulationJobBatches only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListSimulationJobBatches request with the returned nextToken value.

" + }, + "filters":{ + "shape":"Filters", + "documentation":"

Optional filters to limit results.

" + } + } + }, + "ListSimulationJobBatchesResponse":{ + "type":"structure", + "members":{ + "simulationJobBatchSummaries":{ + "shape":"SimulationJobBatchSummaries", + "documentation":"

A list of simulation job batch summaries.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The nextToken value to include in a future ListSimulationJobBatches request. When the results of a ListSimulationJobBatches request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "ListSimulationJobsRequest":{ "type":"structure", "members":{ @@ -2250,7 +2468,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of deployment job results returned by ListSimulationJobs in paginated output. When this parameter is used, ListSimulationJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListSimulationJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListSimulationJobs returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

When this parameter is used, ListSimulationJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListSimulationJobs request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then ListSimulationJobs returns up to 1000 results and a nextToken value if applicable.

" }, "filters":{ "shape":"Filters", @@ -2304,6 +2522,7 @@ }, "documentation":"

The logging configuration.

" }, + "MaxConcurrency":{"type":"integer"}, "MaxResults":{"type":"integer"}, "Name":{ "type":"string", @@ -2331,6 +2550,7 @@ }, "NonEmptyString":{ "type":"string", + "max":255, "min":1 }, "NonSystemPort":{ @@ -2631,7 +2851,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about a robot software suite.

" + "documentation":"

Information about a robot software suite (ROS distribution).

" } }, "documentation":"

Summary information for a robot application.

" @@ -2691,14 +2911,14 @@ "members":{ "name":{ "shape":"RobotSoftwareSuiteType", - "documentation":"

The name of the robot software suite.

" + "documentation":"

The name of the robot software suite (ROS distribution).

" }, "version":{ "shape":"RobotSoftwareSuiteVersionType", - "documentation":"

The version of the robot software suite.

" + "documentation":"

The version of the robot software suite (ROS distribution).

" } }, - "documentation":"

Information about a robot software suite.

" + "documentation":"

Information about a robot software suite (ROS distribution).

" }, "RobotSoftwareSuiteType":{ "type":"string", @@ -2866,7 +3086,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about a robot software suite.

" + "documentation":"

Information about a robot software suite (ROS distribution).

" }, "simulationSoftwareSuite":{ "shape":"SimulationSoftwareSuite", @@ -2961,6 +3181,62 @@ }, "documentation":"

Information about a simulation job.

" }, + "SimulationJobBatchErrorCode":{ + "type":"string", + "enum":["InternalServiceError"] + }, + "SimulationJobBatchStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Failed", + "Completed", + "Canceled", + "Canceling", + "Completing", + "TimingOut", + "TimedOut" + ] + }, + "SimulationJobBatchSummaries":{ + "type":"list", + "member":{"shape":"SimulationJobBatchSummary"} + }, + "SimulationJobBatchSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the batch.

" + }, + "lastUpdatedAt":{ + "shape":"LastUpdatedAt", + "documentation":"

The time, in milliseconds since the epoch, when the simulation job batch was last updated.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The time, in milliseconds since the epoch, when the simulation job batch was created.

" + }, + "status":{ + "shape":"SimulationJobBatchStatus", + "documentation":"

The status of the simulation job batch.

Pending

The simulation job batch request is pending.

InProgress

The simulation job batch is in progress.

Failed

The simulation job batch failed. One or more simulation job requests could not be completed due to an internal failure (like InternalServiceError). See failureCode and failureReason for more information.

Completed

The simulation batch job completed. A batch is complete when (1) there are no pending simulation job requests in the batch and none of the failed simulation job requests are due to InternalServiceError and (2) when all created simulation jobs have reached a terminal state (for example, Completed or Failed).

Canceled

The simulation batch job was cancelled.

Canceling

The simulation batch job is being cancelled.

Completing

The simulation batch job is completing.

TimingOut

The simulation job batch is timing out.

If a batch timing out, and there are pending requests that were failing due to an internal failure (like InternalServiceError), the batch status will be Failed. If there are no such failing request, the batch status will be TimedOut.

TimedOut

The simulation batch job timed out.

" + }, + "failedRequestCount":{ + "shape":"Integer", + "documentation":"

The number of failed simulation job requests.

" + }, + "pendingRequestCount":{ + "shape":"Integer", + "documentation":"

The number of pending simulation job requests.

" + }, + "createdRequestCount":{ + "shape":"Integer", + "documentation":"

The number of created simulation job requests.

" + } + }, + "documentation":"

Information about a simulation job batch.

" + }, "SimulationJobErrorCode":{ "type":"string", "enum":[ @@ -2978,10 +3254,14 @@ "InvalidBundleRobotApplication", "InvalidBundleSimulationApplication", "InvalidS3Resource", + "LimitExceeded", "MismatchedEtag", "RobotApplicationVersionMismatchedEtag", "SimulationApplicationVersionMismatchedEtag", "ResourceNotFound", + "RequestThrottled", + "BatchTimedOut", + "BatchCanceled", "InvalidInput", "WrongRegionS3Bucket", "WrongRegionS3Output", @@ -2989,6 +3269,48 @@ "WrongRegionSimulationApplication" ] }, + "SimulationJobRequest":{ + "type":"structure", + "required":["maxJobDurationInSeconds"], + "members":{ + "outputLocation":{"shape":"OutputLocation"}, + "loggingConfig":{"shape":"LoggingConfig"}, + "maxJobDurationInSeconds":{ + "shape":"JobDuration", + "documentation":"

The maximum simulation job duration in seconds. The value must be 8 days (691,200 seconds) or less.

" + }, + "iamRole":{ + "shape":"IamRole", + "documentation":"

The IAM role name that allows the simulation instance to call the AWS APIs that are specified in its associated policies on your behalf. This is how credentials are passed in to your simulation job.

" + }, + "failureBehavior":{ + "shape":"FailureBehavior", + "documentation":"

The failure behavior the simulation job.

Continue

Restart the simulation job in the same host instance.

Fail

Stop the simulation job and terminate the instance.

" + }, + "useDefaultApplications":{ + "shape":"BoxedBoolean", + "documentation":"

Boolean indicating whether to use default simulation tool applications.

" + }, + "robotApplications":{ + "shape":"RobotApplicationConfigs", + "documentation":"

The robot applications to use in the simulation job.

" + }, + "simulationApplications":{ + "shape":"SimulationApplicationConfigs", + "documentation":"

The simulation applications to use in the simulation job.

" + }, + "dataSources":{ + "shape":"DataSourceConfigs", + "documentation":"

Specify data sources to mount read-only files from S3 into your simulation. These files are available under /opt/robomaker/datasources/data_source_name.

There is a limit of 100 files and a combined size of 25GB for all DataSourceConfig objects.

" + }, + "vpcConfig":{"shape":"VPCConfig"}, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job request.

" + } + }, + "documentation":"

Information about a simulation job request.

" + }, "SimulationJobStatus":{ "type":"string", "enum":[ @@ -3122,6 +3444,77 @@ "type":"list", "member":{"shape":"Source"} }, + "StartSimulationJobBatchRequest":{ + "type":"structure", + "required":["createSimulationJobRequests"], + "members":{ + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "batchPolicy":{ + "shape":"BatchPolicy", + "documentation":"

The batch policy.

" + }, + "createSimulationJobRequests":{ + "shape":"CreateSimulationJobRequests", + "documentation":"

A list of simulation job requests to create in the batch.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the deployment job batch.

" + } + } + }, + "StartSimulationJobBatchResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (arn) of the batch.

" + }, + "status":{ + "shape":"SimulationJobBatchStatus", + "documentation":"

The status of the simulation job batch.

Pending

The simulation job batch request is pending.

InProgress

The simulation job batch is in progress.

Failed

The simulation job batch failed. One or more simulation job requests could not be completed due to an internal failure (like InternalServiceError). See failureCode and failureReason for more information.

Completed

The simulation batch job completed. A batch is complete when (1) there are no pending simulation job requests in the batch and none of the failed simulation job requests are due to InternalServiceError and (2) when all created simulation jobs have reached a terminal state (for example, Completed or Failed).

Canceled

The simulation batch job was cancelled.

Canceling

The simulation batch job is being cancelled.

Completing

The simulation batch job is completing.

TimingOut

The simulation job batch is timing out.

If a batch timing out, and there are pending requests that were failing due to an internal failure (like InternalServiceError), the batch status will be Failed. If there are no such failing request, the batch status will be TimedOut.

TimedOut

The simulation batch job timed out.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The time, in milliseconds since the epoch, when the simulation job batch was created.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "batchPolicy":{ + "shape":"BatchPolicy", + "documentation":"

The batch policy.

" + }, + "failureCode":{ + "shape":"SimulationJobBatchErrorCode", + "documentation":"

The failure code if the simulation job batch failed.

" + }, + "failureReason":{ + "shape":"GenericString", + "documentation":"

The reason the simulation job batch failed.

" + }, + "failedRequests":{ + "shape":"FailedCreateSimulationJobRequests", + "documentation":"

A list of failed simulation job requests. The request failed to be created into a simulation job. Failed requests do not have a simulation job ID.

" + }, + "pendingRequests":{ + "shape":"CreateSimulationJobRequests", + "documentation":"

A list of pending simulation job requests. These requests have not yet been created into simulation jobs.

" + }, + "createdRequests":{ + "shape":"SimulationJobSummaries", + "documentation":"

A list of created simulation job request summaries.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the deployment job batch.

" + } + } + }, "Subnets":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -3283,7 +3676,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite used by the robot application.

" + "documentation":"

The robot software suite (ROS distribution) used by the robot application.

" }, "currentRevisionId":{ "shape":"RevisionId", @@ -3312,7 +3705,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

The robot software suite used by the robot application.

" + "documentation":"

The robot software suite (ROS distribution) used by the robot application.

" }, "lastUpdatedAt":{ "shape":"LastUpdatedAt", @@ -3347,7 +3740,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about the robot software suite.

" + "documentation":"

Information about the robot software suite (ROS distribution).

" }, "renderingEngine":{ "shape":"RenderingEngine", @@ -3384,7 +3777,7 @@ }, "robotSoftwareSuite":{ "shape":"RobotSoftwareSuite", - "documentation":"

Information about the robot software suite.

" + "documentation":"

Information about the robot software suite (ROS distribution).

" }, "renderingEngine":{ "shape":"RenderingEngine", diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index a7bc9005..027a0f0d 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, for copying an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

Amazon S3 transfer acceleration does not support cross-region copies. If you request a cross-region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information about transfer acceleration, see Transfer Acceleration.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since.

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

You can use this operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes.

The source object that you are copying can be encrypted or unencrypted. If the source object is encrypted, it can be encrypted by server-side encryption using AWS managed encryption keys or by using a customer-provided encryption key. When copying an object, you can request that Amazon S3 encrypt the target object by using either the AWS managed encryption keys or by using your own encryption key. You can do this regardless of the form of server-side encryption that was used to encrypt the source, or even if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

Consider the following when using request headers:

  • Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

    • x-amz-copy-source-if-match condition evaluates to true

    • x-amz-copy-source-if-unmodified-since condition evaluates to false

  • Consideration 2 – If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

    • x-amz-copy-source-if-none-match condition evaluates to false

    • x-amz-copy-source-if-modified-since condition evaluates to true

The copy request charge is based on the storage class and Region you specify for the destination object. For pricing information, see Amazon S3 Pricing.

Following are other considerations when using CopyObject:

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. (If the current version is a delete marker, Amazon S3 behaves as if the object was deleted.) To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see .

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

To encrypt the target object, you must provide the appropriate encryption-related request headers. The one you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • To encrypt the target object using server-side encryption with an AWS managed encryption key, provide the following request headers, as appropriate.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed customer master key (CMK) in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in KMS.

  • To encrypt the target object using server-side encryption with an encryption key that you provide, use the following headers.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

  • If the source object is encrypted using server-side encryption with customer-provided encryption keys, you must use the following headers.

    • x-amz-copy-source​-server-side​-encryption​-customer-algorithm

    • x-amz-copy-source​-server-side​-encryption​-customer-key

    • x-amz-copy-source-​server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

The following operations are related to CopyObject:

For more information, see Copying Objects.

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, for copying an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

Amazon S3 transfer acceleration does not support cross-region copies. If you request a cross-region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information about transfer acceleration, see Transfer Acceleration.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since.

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

You can use this operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes.

The source object that you are copying can be encrypted or unencrypted. If the source object is encrypted, it can be encrypted by server-side encryption using AWS managed encryption keys or by using a customer-provided encryption key. When copying an object, you can request that Amazon S3 encrypt the target object by using either the AWS managed encryption keys or by using your own encryption key. You can do this regardless of the form of server-side encryption that was used to encrypt the source, or even if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

Consider the following when using request headers:

  • Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

    • x-amz-copy-source-if-match condition evaluates to true

    • x-amz-copy-source-if-unmodified-since condition evaluates to false

  • Consideration 2 – If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

    • x-amz-copy-source-if-none-match condition evaluates to false

    • x-amz-copy-source-if-modified-since condition evaluates to true

The copy request charge is based on the storage class and Region you specify for the destination object. For pricing information, see Amazon S3 Pricing.

Following are other considerations when using CopyObject:

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. (If the current version is a delete marker, Amazon S3 behaves as if the object was deleted.) To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see .

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

To encrypt the target object, you must provide the appropriate encryption-related request headers. The one you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • To encrypt the target object using server-side encryption with an AWS managed encryption key, provide the following request headers, as appropriate.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id of the symmetric customer managed CMK. Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in KMS.

  • To encrypt the target object using server-side encryption with an encryption key that you provide, use the following headers.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

  • If the source object is encrypted using server-side encryption with customer-provided encryption keys, you must use the following headers.

    • x-amz-copy-source​-server-side​-encryption​-customer-algorithm

    • x-amz-copy-source​-server-side​-encryption​-customer-key

    • x-amz-copy-source-​server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

The following operations are related to CopyObject:

For more information, see Copying Objects.

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -79,7 +79,7 @@ "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart) and UploadPartCopy) requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

The following operations are related to CreateMultipartUpload:

", + "documentation":"

This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart) and UploadPartCopy) requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

The following operations are related to CreateMultipartUpload:

", "alias":"InitiateMultipartUpload" }, "DeleteBucket":{ @@ -219,7 +219,7 @@ }, "input":{"shape":"DeleteObjectTaggingRequest"}, "output":{"shape":"DeleteObjectTaggingOutput"}, - "documentation":"

Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.

To use this operation, you must have permission to perform the s3:DeleteObjectTagging action.

To delete tags of a specific object version, add the versionId query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging action.

The following operations are related to DeleteBucketMetricsConfiguration:

" + "documentation":"

Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.

To use this operation, you must have permission to perform the s3:DeleteObjectTagging action.

To delete tags of a specific object version, add the versionId query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging action.

The following operations are related to DeleteBucketMetricsConfiguration:

" }, "DeleteObjects":{ "name":"DeleteObjects", @@ -720,7 +720,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

This implementation of the PUT operation sets default encryption for a buckets using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS) bucket.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

This implementation of the PUT operation sets default encryption for a bucket using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS).

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", @@ -858,7 +858,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

To configure your application to send the request headers before sending the request body, use the 100-continue HTTP status code. For PUT operations, this helps you avoid sending the message body if the message is rejected based on the headers (for example, because authentication fails or a redirect occurs). For more information on the 100-continue HTTP status code, see Section 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt.

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side Encryption.

Access Permissions

You can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • EU (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the default AWS KMS CMK to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    If you use this feature, the ETag value that Amazon S3 returns in the response is not the MD5 of the object.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Storage Class Options

By default, Amazon S3 uses the Standard storage class to store newly created objects. The Standard storage class provides high durability and high availability. You can specify other storage classes depending on the performance needs. For more information, see Storage Classes in the Amazon Simple Storage Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response using the x-amz-version-id response header. If versioning is suspended, Amazon S3 always uses null as the version ID for the object stored. For more information about returning the versioning state of a bucket, see GetBucketVersioning. If you enable versioning for a bucket, when Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

Related Resources

" + "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

To configure your application to send the request headers before sending the request body, use the 100-continue HTTP status code. For PUT operations, this helps you avoid sending the message body if the message is rejected based on the headers (for example, because authentication fails or a redirect occurs). For more information on the 100-continue HTTP status code, see Section 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt.

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side Encryption.

Access Permissions

You can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id of the symmetric customer managed CMK. Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • EU (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id of the symmetric customer managed CMK. Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    If you use this feature, the ETag value that Amazon S3 returns in the response is not the MD5 of the object.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Storage Class Options

By default, Amazon S3 uses the Standard storage class to store newly created objects. The Standard storage class provides high durability and high availability. You can specify other storage classes depending on the performance needs. For more information, see Storage Classes in the Amazon Simple Storage Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response using the x-amz-version-id response header. If versioning is suspended, Amazon S3 always uses null as the version ID for the object stored. For more information about returning the versioning state of a bucket, see GetBucketVersioning. If you enable versioning for a bucket, when Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

Related Resources

" }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -1492,7 +1492,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1655,7 +1655,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1845,7 +1845,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", + "documentation":"

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2078,7 +2078,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2224,7 +2224,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", + "documentation":"

Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2772,7 +2772,7 @@ }, "KMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.

" + "documentation":"

If the encryption type is aws:kms, this optional value specifies the ID of the symmetric customer managed AWS KMS CMK to use for encryption of job results. Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

" }, "KMSContext":{ "shape":"KMSContext", @@ -2786,7 +2786,7 @@ "members":{ "ReplicaKmsKeyID":{ "shape":"ReplicaKmsKeyID", - "documentation":"

Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. Amazon S3 uses this key to encrypt replica objects.

" + "documentation":"

Specifies the ID (Key ARN or Alias ARN) of the customer managed customer master key (CMK) stored in AWS Key Management Service (KMS) for the destination bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only supports symmetric customer managed CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

" } }, "documentation":"

Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.

" @@ -3634,7 +3634,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -4166,7 +4166,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -6807,7 +6807,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -6970,7 +6970,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

If the value of x-amz-server-side-encryption is aws:kms, this header specifies the ID of the AWS KMS CMK that will be used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS to protect the data.

", + "documentation":"

If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) symmetrical customer managed customer master key (CMK) that was used for the object.

If the value of x-amz-server-side-encryption is aws:kms, this header specifies the ID of the symmetric customer managed AWS KMS CMK that will be used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS to protect the data.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -7426,7 +7426,7 @@ }, "RequestPayer":{ "type":"string", - "documentation":"

Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requestor Pays Buckets in the Amazon S3 Developer Guide.

", + "documentation":"

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from requester pays buckets, see Downloading Objects in Requestor Pays Buckets in the Amazon S3 Developer Guide.

", "enum":["requester"] }, "RequestPaymentConfiguration":{ @@ -7672,7 +7672,7 @@ "members":{ "KeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

Specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for encrypting inventory reports.

" + "documentation":"

Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) to use for encrypting inventory reports.

" } }, "documentation":"

Specifies the use of SSE-KMS to encrypt delivered inventory reports.

", @@ -8198,7 +8198,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -8352,7 +8352,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) was used for the object.

", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) was used for the object.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 403d7ae4..b2d8a724 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -280,6 +280,9 @@ }, "input":{"shape":"CreatePresignedDomainUrlRequest"}, "output":{"shape":"CreatePresignedDomainUrlResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Amazon SageMaker Studio (Studio), and granted access to all of the Apps and files associated with that Amazon Elastic File System (EFS). This operation can only be called when AuthMode equals IAM.

" }, "CreatePresignedNotebookInstanceUrl":{ @@ -362,7 +365,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one or more trial components. A trial component can be used in multiple trials.

Trial components include pre-processing jobs, training jobs, and batch transform jobs.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial component and then use the Search API to search for the tags.

You can create a trial component through a direct call to the CreateTrialComponent API. However, you can't specify the Source property of the component in the request, therefore, the component isn't associated with an Amazon SageMaker job. You must use Amazon SageMaker Studio, the Amazon SageMaker Python SDK, or the AWS SDK for Python (Boto) to create the component with a valid Source property.

" + "documentation":"

Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one or more trial components. A trial component can be used in multiple trials.

Trial components include pre-processing jobs, training jobs, and batch transform jobs.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial component and then use the Search API to search for the tags.

CreateTrialComponent can only be invoked from within an Amazon SageMaker managed environment. This includes Amazon SageMaker training jobs, processing jobs, transform jobs, and Amazon SageMaker notebooks. A call to CreateTrialComponent from outside one of these environments results in an error.

" }, "CreateUserProfile":{ "name":"CreateUserProfile", @@ -888,6 +891,16 @@ ], "documentation":"

Describes the user profile.

" }, + "DescribeWorkforce":{ + "name":"DescribeWorkforce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkforceRequest"}, + "output":{"shape":"DescribeWorkforceResponse"}, + "documentation":"

Lists private workforce information, including workforce name, Amazon Resource Name (ARN), and, if applicable, allowed IP address ranges (CIDRs). Allowable IP address ranges are the IP addresses that workers can use to access tasks.

This operation applies only to private workforces.

" + }, "DescribeWorkteam":{ "name":"DescribeWorkteam", "http":{ @@ -1208,7 +1221,10 @@ }, "input":{"shape":"ListTrialComponentsRequest"}, "output":{"shape":"ListTrialComponentsResponse"}, - "documentation":"

Lists the trial components in your account. You can filter the list to show only components that were created in a specific time range. You can sort the list by trial component name or creation time.

" + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Lists the trial components in your account. You can sort the list by trial component name or creation time. You can filter the list to show only components that were created in a specific time range. You can also filter on one of the following:

  • ExperimentName

  • SourceArn

  • TrialName

" }, "ListTrials":{ "name":"ListTrials", @@ -1218,6 +1234,9 @@ }, "input":{"shape":"ListTrialsRequest"}, "output":{"shape":"ListTrialsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], "documentation":"

Lists the trials in your account. Specify an experiment name to limit the list to the trials that are part of that experiment. The list can be filtered to show only trials that were created in a specific time range. The list can be sorted by trial name or creation time.

" }, "ListUserProfiles":{ @@ -1258,7 +1277,7 @@ }, "input":{"shape":"SearchRequest"}, "output":{"shape":"SearchResponse"}, - "documentation":"

Finds Amazon SageMaker resources that match a search query. Matching resource objects are returned as a list of SearchResult objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numerical, text, Booleans, and timestamps.

" + "documentation":"

Finds Amazon SageMaker resources that match a search query. Matching resource objects are returned as a list of SearchResult objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numeric, text, Boolean, and timestamp.

" }, "StartMonitoringSchedule":{ "name":"StartMonitoringSchedule", @@ -1537,6 +1556,16 @@ ], "documentation":"

Updates a user profile.

" }, + "UpdateWorkforce":{ + "name":"UpdateWorkforce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWorkforceRequest"}, + "output":{"shape":"UpdateWorkforceResponse"}, + "documentation":"

Restricts access to tasks assigned to workers in the specified workforce to those within specific ranges of IP addresses. You specify allowed IP addresses by creating a list of up to four CIDRs.

By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied access and get a Not Found error message on the worker portal. After restricting access with this operation, you can see the allowed IP values for a private workforce with the operation.

This operation applies only to private workforces.

" + }, "UpdateWorkteam":{ "name":"UpdateWorkteam", "http":{ @@ -1632,7 +1661,7 @@ }, "EnableSageMakerMetricsTimeSeries":{ "shape":"Boolean", - "documentation":"

To generate and save time-series metrics during training, set to true. The default is false and time-series metrics aren't generated except in the following cases:

  • You use one of the Amazon SageMaker built-in algorithms

  • You use one of the following prebuilt Amazon SageMaker Docker images:

    • Tensorflow

    • MXNet

    • PyTorch

  • You specify at least one MetricDefinition

" + "documentation":"

To generate and save time-series metrics during training, set to true. The default is false and time-series metrics aren't generated except in the following cases:

" } }, "documentation":"

Specifies the training algorithm to use in a CreateTrainingJob request.

For more information about algorithms provided by Amazon SageMaker, see Algorithms. For information about using your own algorithms, see Using Your Own Algorithms with Amazon SageMaker.

" @@ -2622,6 +2651,16 @@ }, "documentation":"

Contains information about the output location for managed spot training checkpoint data.

" }, + "Cidr":{ + "type":"string", + "max":64, + "min":4, + "pattern":"(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$)" + }, + "Cidrs":{ + "type":"list", + "member":{"shape":"Cidr"} + }, "CodeRepositoryArn":{ "type":"string", "max":2048, @@ -2741,7 +2780,7 @@ "members":{ "CollectionName":{ "shape":"CollectionName", - "documentation":"

The name of the tensor collection.

" + "documentation":"

The name of the tensor collection. The name must be unique relative to other rule configuration names.

" }, "CollectionParameters":{ "shape":"CollectionParameters", @@ -3293,7 +3332,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

" + "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

The KmsKeyId can be any of the following formats:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the AWS Key Management Service section Using Key Policies in AWS KMS

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

" } } }, @@ -3581,7 +3620,7 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

" } } }, @@ -3936,7 +3975,7 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

" }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", @@ -4360,7 +4399,7 @@ }, "LocalPath":{ "shape":"DirectoryPath", - "documentation":"

Path to local storage location for rules. Defaults to /opt/ml/processing/output/rule/.

" + "documentation":"

Path to local storage location for output of rules. Defaults to /opt/ml/processing/output/rule/.

" }, "S3OutputPath":{ "shape":"S3Uri", @@ -4376,7 +4415,7 @@ }, "VolumeSizeInGB":{ "shape":"OptionalVolumeSizeInGB", - "documentation":"

The size, in GB, of the ML storage volume attached to the notebook instance.

" + "documentation":"

The size, in GB, of the ML storage volume attached to the processing instance.

" }, "RuleParameters":{ "shape":"RuleParameters", @@ -5620,7 +5659,7 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

If True, no inbound or outbound network calls can be made to or from the model container.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

If True, no inbound or outbound network calls can be made to or from the model container.

" } } }, @@ -6124,7 +6163,7 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

" }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", @@ -6442,6 +6481,26 @@ } } }, + "DescribeWorkforceRequest":{ + "type":"structure", + "required":["WorkforceName"], + "members":{ + "WorkforceName":{ + "shape":"WorkforceName", + "documentation":"

The name of the private workforce whose access you want to restrict. WorkforceName is automatically set to \"default\" when a workforce is created and cannot be modified.

" + } + } + }, + "DescribeWorkforceResponse":{ + "type":"structure", + "required":["Workforce"], + "members":{ + "Workforce":{ + "shape":"Workforce", + "documentation":"

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

" + } + } + }, "DescribeWorkteamRequest":{ "type":"structure", "required":["WorkteamName"], @@ -7203,6 +7262,7 @@ "type":"string", "enum":[ "TENSORFLOW", + "KERAS", "MXNET", "ONNX", "PYTORCH", @@ -7216,7 +7276,7 @@ "members":{ "Resource":{ "shape":"ResourceType", - "documentation":"

The name of the Amazon SageMaker resource to Search for. The only valid Resource value is TrainingJob.

" + "documentation":"

The name of the Amazon SageMaker resource to Search for.

" }, "SuggestionQuery":{ "shape":"SuggestionQuery", @@ -7287,7 +7347,7 @@ "jsonvalue":true } }, - "documentation":"

Defines under what conditions SageMaker creates a human loop.

" + "documentation":"

Defines under what conditions SageMaker creates a human loop. Used within .

" }, "HumanLoopActivationConfig":{ "type":"structure", @@ -7411,11 +7471,11 @@ }, "TaskAvailabilityLifetimeInSeconds":{ "shape":"TaskAvailabilityLifetimeInSeconds", - "documentation":"

The length of time that a task remains available for labeling by human workers. If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43200). For private and vendor workforces, the maximum is as listed.

" + "documentation":"

The length of time that a task remains available for labeling by human workers. If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43200). The default value is 864000 seconds (1 day). For private and vendor workforces, the maximum is as listed.

" }, "MaxConcurrentTaskCount":{ "shape":"MaxConcurrentTaskCount", - "documentation":"

Defines the maximum number of data objects that can be labeled by human workers at the same time. Also referred to as batch size. Each object may have more than one worker at one time.

" + "documentation":"

Defines the maximum number of data objects that can be labeled by human workers at the same time. Also referred to as batch size. Each object may have more than one worker at one time. The default value is 1000 objects.

" }, "AnnotationConsolidationConfig":{ "shape":"AnnotationConsolidationConfig", @@ -7592,7 +7652,7 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

The Semantic Segmentation built-in algorithm does not support network isolation.

" + "documentation":"

Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

" }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", @@ -7936,7 +7996,7 @@ }, "DataInputConfig":{ "shape":"DataInputConfig", - "documentation":"

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

  • TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input\":[1,1024,1024,3]}

      • If using the CLI, {\\\"input\\\":[1,1024,1024,3]}

    • Examples for two inputs:

      • If using the console, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}

      • If using the CLI, {\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}

  • MXNET/ONNX: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"data\":[1,3,1024,1024]}

      • If using the CLI, {\\\"data\\\":[1,3,1024,1024]}

    • Examples for two inputs:

      • If using the console, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]}

      • If using the CLI, {\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}

  • PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.

    • Examples for one input in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224]}

    • Example for one input in list format: [[1,3,224,224]]

    • Examples for two inputs in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]}

    • Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]]

  • XGBOOST: input data name and shape are not needed.

" + "documentation":"

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

  • TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input\":[1,1024,1024,3]}

      • If using the CLI, {\\\"input\\\":[1,1024,1024,3]}

    • Examples for two inputs:

      • If using the console, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}

      • If using the CLI, {\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}

  • KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input_1\":[1,3,224,224]}

      • If using the CLI, {\\\"input_1\\\":[1,3,224,224]}

    • Examples for two inputs:

      • If using the console, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]}

      • If using the CLI, {\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}

  • MXNET/ONNX: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"data\":[1,3,1024,1024]}

      • If using the CLI, {\\\"data\\\":[1,3,1024,1024]}

    • Examples for two inputs:

      • If using the console, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]}

      • If using the CLI, {\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}

  • PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.

    • Examples for one input in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224]}

    • Example for one input in list format: [[1,3,224,224]]

    • Examples for two inputs in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]}

    • Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]]

  • XGBOOST: input data name and shape are not needed.

" }, "Framework":{ "shape":"Framework", @@ -8900,7 +8960,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of experiments to return in the response.

" + "documentation":"

The maximum number of experiments to return in the response. The default value is 10.

" } } }, @@ -9821,9 +9881,17 @@ "ListTrialComponentsRequest":{ "type":"structure", "members":{ + "ExperimentName":{ + "shape":"ExperimentEntityName", + "documentation":"

A filter that returns only components that are part of the specified experiment. If you specify ExperimentName, you can't filter by SourceArn or TrialName.

" + }, + "TrialName":{ + "shape":"ExperimentEntityName", + "documentation":"

A filter that returns only components that are part of the specified trial. If you specify TrialName, you can't filter by ExperimentName or SourceArn.

" + }, "SourceArn":{ "shape":"String256", - "documentation":"

A filter that returns only components that have the specified source Amazon Resource Name (ARN).

" + "documentation":"

A filter that returns only components that have the specified source Amazon Resource Name (ARN). If you specify SourceArn, you can't filter by ExperimentName or TrialName.

" }, "CreatedAfter":{ "shape":"Timestamp", @@ -9843,7 +9911,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of components to return in the response.

" + "documentation":"

The maximum number of components to return in the response. The default value is 10.

" }, "NextToken":{ "shape":"NextToken", @@ -9889,7 +9957,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of trials to return in the response.

" + "documentation":"

The maximum number of trials to return in the response. The default value is 10.

" }, "NextToken":{ "shape":"NextToken", @@ -10764,7 +10832,7 @@ "members":{ "NestedPropertyName":{ "shape":"ResourcePropertyName", - "documentation":"

The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig .

" + "documentation":"

The name of the property to use in the nested filters. The value must match a listed property name, such as InputDataConfig.

" }, "Filters":{ "shape":"FilterList", @@ -11757,10 +11825,10 @@ "members":{ "PropertyNameHint":{ "shape":"PropertyNameHint", - "documentation":"

Text that is part of a property's name. The property names of hyperparameter, metric, and tag key names that begin with the specified text in the PropertyNameHint.

" + "documentation":"

Text that begins a property's name.

" } }, - "documentation":"

A type of SuggestionQuery. A suggestion query for retrieving property names that match the specified hint.

" + "documentation":"

Part of the SuggestionQuery type. Specifies a hint for retrieving property names that begin with the specified text.

" }, "PropertyNameSuggestion":{ "type":"structure", @@ -12183,7 +12251,7 @@ "members":{ "Resource":{ "shape":"ResourceType", - "documentation":"

The name of the Amazon SageMaker resource to search for. Currently, the only valid Resource value is TrainingJob.

" + "documentation":"

The name of the Amazon SageMaker resource to search for.

" }, "SearchExpression":{ "shape":"SearchExpression", @@ -12330,7 +12398,7 @@ "documentation":"

Determines the shuffling order in ShuffleConfig value.

" } }, - "documentation":"

A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.

For Pipe input mode, shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.

" + "documentation":"

A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.

For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.

" }, "SingleSignOnUserIdentifier":{ "type":"string", @@ -12404,6 +12472,17 @@ }, "documentation":"

A list of algorithms that were used to create a model package.

" }, + "SourceIpConfig":{ + "type":"structure", + "required":["Cidrs"], + "members":{ + "Cidrs":{ + "shape":"Cidrs", + "documentation":"

A list of one to four Classless Inter-Domain Routing (CIDR) values.

Maximum: 4 CIDR values

The following Length Constraints apply to individual CIDR values in the CIDR value list.

" + } + }, + "documentation":"

A list of IP address ranges (CIDRs). Used to create an allow list of IP addresses for a private workforce. For more information, see .

" + }, "SourceType":{ "type":"string", "max":128 @@ -12615,10 +12694,10 @@ "members":{ "PropertyNameQuery":{ "shape":"PropertyNameQuery", - "documentation":"

A type of SuggestionQuery. Defines a property name hint. Only property names that match the specified hint are included in the response.

" + "documentation":"

Defines a property name hint. Only property names that begin with the specified hint are included in the response.

" } }, - "documentation":"

Limits the property names that are included in the response.

" + "documentation":"

Specified in the GetSearchSuggestions request. Limits the property names that are included in the response.

" }, "Tag":{ "type":"structure", @@ -12694,7 +12773,7 @@ "TaskAvailabilityLifetimeInSeconds":{ "type":"integer", "max":864000, - "min":1 + "min":60 }, "TaskCount":{ "type":"integer", @@ -12801,6 +12880,12 @@ "ml.m4.4xlarge", "ml.m4.10xlarge", "ml.m4.16xlarge", + "ml.g4dn.xlarge", + "ml.g4dn.2xlarge", + "ml.g4dn.4xlarge", + "ml.g4dn.8xlarge", + "ml.g4dn.12xlarge", + "ml.g4dn.16xlarge", "ml.m5.large", "ml.m5.xlarge", "ml.m5.2xlarge", @@ -13366,7 +13451,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

  • // KMS Key ID

    \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • // Amazon Resource Name (ARN) of a KMS Key

    \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

  • // KMS Key Alias

    \"alias/ExampleAlias\"

  • // Amazon Resource Name (ARN) of a KMS Key Alias

    \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"

If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateModel request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias

If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateModel request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" } }, "documentation":"

Describes the results of a transform job.

" @@ -13388,7 +13473,7 @@ }, "VolumeKmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:

  • // KMS Key ID

    \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • // Amazon Resource Name (ARN) of a KMS Key

    \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias

" } }, "documentation":"

Describes the resources, including ML instance types and ML instance count, to use for transform job.

" @@ -14232,6 +14317,30 @@ } } }, + "UpdateWorkforceRequest":{ + "type":"structure", + "required":["WorkforceName"], + "members":{ + "WorkforceName":{ + "shape":"WorkforceName", + "documentation":"

The name of the private workforce whose access you want to restrict. WorkforceName is automatically set to \"default\" when a workforce is created and cannot be modified.

" + }, + "SourceIpConfig":{ + "shape":"SourceIpConfig", + "documentation":"

A list of one to four worker IP address ranges (CIDRs) that can be used to access tasks assigned to this workforce.

Maximum: 4 CIDR values

" + } + } + }, + "UpdateWorkforceResponse":{ + "type":"structure", + "required":["Workforce"], + "members":{ + "Workforce":{ + "shape":"Workforce", + "documentation":"

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

" + } + } + }, "UpdateWorkteamRequest":{ "type":"structure", "required":["WorkteamName"], @@ -14415,6 +14524,43 @@ "max":5, "min":1 }, + "Workforce":{ + "type":"structure", + "required":[ + "WorkforceName", + "WorkforceArn" + ], + "members":{ + "WorkforceName":{ + "shape":"WorkforceName", + "documentation":"

The name of the private workforce whose access you want to restrict. WorkforceName is automatically set to \"default\" when a workforce is created and cannot be modified.

" + }, + "WorkforceArn":{ + "shape":"WorkforceArn", + "documentation":"

The Amazon Resource Name (ARN) of the private workforce.

" + }, + "LastUpdatedDate":{ + "shape":"Timestamp", + "documentation":"

The most recent date that was used to successfully add one or more IP address ranges (CIDRs) to a private workforce's allow list.

" + }, + "SourceIpConfig":{ + "shape":"SourceIpConfig", + "documentation":"

A list of one to four IP address ranges (CIDRs) to be added to the workforce allow list.

" + } + }, + "documentation":"

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

" + }, + "WorkforceArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:workforce/.*" + }, + "WorkforceName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z0-9]([a-zA-Z0-9\\-])*$" + }, "Workteam":{ "type":"structure", "required":[ diff --git a/botocore/data/securityhub/2018-10-26/service-2.json b/botocore/data/securityhub/2018-10-26/service-2.json index ea899ddf..41411bdb 100644 --- a/botocore/data/securityhub/2018-10-26/service-2.json +++ b/botocore/data/securityhub/2018-10-26/service-2.json @@ -27,7 +27,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from. When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account.

" + "documentation":"

Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from.

When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account.

" }, "BatchDisableStandards":{ "name":"BatchDisableStandards", @@ -43,7 +43,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Disables the standards specified by the provided StandardsSubscriptionArns. For more information, see Standards Supported in AWS Security Hub.

" + "documentation":"

Disables the standards specified by the provided StandardsSubscriptionArns.

For more information, see Standards Supported in AWS Security Hub.

" }, "BatchEnableStandards":{ "name":"BatchEnableStandards", @@ -59,7 +59,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Enables the standards specified by the provided standardsArn. In this release, only CIS AWS Foundations standards are supported. For more information, see Standards Supported in AWS Security Hub.

" + "documentation":"

Enables the standards specified by the provided standardsArn.

In this release, only CIS AWS Foundations standards are supported.

For more information, see Standards Supported in AWS Security Hub.

" }, "BatchImportFindings":{ "name":"BatchImportFindings", @@ -75,7 +75,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub. The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

" + "documentation":"

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub.

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

" }, "CreateActionTarget":{ "name":"CreateActionTarget", @@ -92,7 +92,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Creates a custom action target in Security Hub. You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

" + "documentation":"

Creates a custom action target in Security Hub.

You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

" }, "CreateInsight":{ "name":"CreateInsight", @@ -109,7 +109,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Creates a custom insight in Security Hub. An insight is a consolidation of findings that relate to a security issue that requires attention or remediation. Use the GroupByAttribute to group the related findings in the insight.

" + "documentation":"

Creates a custom insight in Security Hub. An insight is a consolidation of findings that relate to a security issue that requires attention or remediation.

To group the related findings in the insight, use the GroupByAttribute.

" }, "CreateMembers":{ "name":"CreateMembers", @@ -126,7 +126,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. You can use the EnableSecurityHub to enable Security Hub.

After you use CreateMembers to create member account associations in Security Hub, you need to use the InviteMembers action, which invites the accounts to enable Security Hub and become member accounts in Security Hub. If the invitation is accepted by the account owner, the account becomes a member account in Security Hub, and a permission policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start being sent to both the member and master accounts.

You can remove the association between the master and member accounts by using the DisassociateFromMasterAccount or DisassociateMembers operation.

" + "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

After you use CreateMembers to create member account associations in Security Hub, you must use the InviteMembers operation to invite the accounts to enable Security Hub and become member accounts in Security Hub.

If the account owner accepts the invitation, the account becomes a member account in Security Hub, and a permission policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start to be sent to both the member and master accounts.

To remove the association between the master and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" }, "DeclineInvitations":{ "name":"DeclineInvitations", @@ -158,7 +158,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes a custom action target from Security Hub. Deleting a custom action target doesn't affect any findings or insights that were already sent to Amazon CloudWatch Events using the custom action.

" + "documentation":"

Deletes a custom action target from Security Hub.

Deleting a custom action target does not affect any findings or insights that were already sent to Amazon CloudWatch Events using the custom action.

" }, "DeleteInsight":{ "name":"DeleteInsight", @@ -258,7 +258,23 @@ {"shape":"InvalidAccessException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Returns information about the products available that you can subscribe to and integrate with Security Hub to consolidate findings.

" + "documentation":"

Returns information about the available products that you can subscribe to and integrate with Security Hub in order to consolidate findings.

" + }, + "DescribeStandardsControls":{ + "name":"DescribeStandardsControls", + "http":{ + "method":"GET", + "requestUri":"/standards/controls/{StandardsSubscriptionArn+}" + }, + "input":{"shape":"DescribeStandardsControlsRequest"}, + "output":{"shape":"DescribeStandardsControlsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of compliance standards controls.

For each control, the results include information about whether it is currently enabled, the severity, and a link to remediation information.

" }, "DisableImportFindingsForProduct":{ "name":"DisableImportFindingsForProduct", @@ -275,7 +291,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Disables the integration of the specified product with Security Hub. Findings from that product are no longer sent to Security Hub after the integration is disabled.

" + "documentation":"

Disables the integration of the specified product with Security Hub. After the integration is disabled, findings from that product are no longer sent to Security Hub.

" }, "DisableSecurityHub":{ "name":"DisableSecurityHub", @@ -291,7 +307,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub. When you disable Security Hub for a master account, it doesn't disable Security Hub for any associated member accounts.

When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and can't be recovered. Any standards that were enabled are disabled, and your master and member account associations are removed. If you want to save your existing findings, you must export them before you disable Security Hub.

" + "documentation":"

Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub.

When you disable Security Hub for a master account, it doesn't disable Security Hub for any associated member accounts.

When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your master and member account associations are removed.

If you want to save your existing findings, you must export them before you disable Security Hub.

" }, "DisassociateFromMasterAccount":{ "name":"DisassociateFromMasterAccount", @@ -342,7 +358,7 @@ {"shape":"ResourceConflictException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub. When you enable a product integration, a permission policy that grants permission for the product to send findings to Security Hub is applied.

" + "documentation":"

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub.

When you enable a product integration, a permission policy that grants permission for the product to send findings to Security Hub is applied.

" }, "EnableSecurityHub":{ "name":"EnableSecurityHub", @@ -359,7 +375,7 @@ {"shape":"ResourceConflictException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Enables Security Hub for your account in the current Region or the Region you specify in the request. When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from AWS Config, Amazon GuardDuty, Amazon Inspector, and Amazon Macie. To learn more, see Setting Up AWS Security Hub.

" + "documentation":"

Enables Security Hub for your account in the current Region or the Region you specify in the request.

Enabling Security Hub also enables the CIS AWS Foundations standard.

When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from AWS Config, Amazon GuardDuty, Amazon Inspector, and Amazon Macie.

To learn more, see Setting Up AWS Security Hub.

" }, "GetEnabledStandards":{ "name":"GetEnabledStandards", @@ -408,7 +424,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the results of the Security Hub insight that the insight ARN specifies.

" + "documentation":"

Lists the results of the Security Hub insight specified by the insight ARN.

" }, "GetInsights":{ "name":"GetInsights", @@ -425,7 +441,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists and describes insights that insight ARNs specify.

" + "documentation":"

Lists and describes insights for the specified insight ARNs.

" }, "GetInvitationsCount":{ "name":"GetInvitationsCount", @@ -458,7 +474,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Provides the details for the Security Hub master account to the current member account.

" + "documentation":"

Provides the details for the Security Hub master account for the current member account.

" }, "GetMembers":{ "name":"GetMembers", @@ -475,7 +491,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the details on the Security Hub member accounts that the account IDs specify.

" + "documentation":"

Returns the details for the Security Hub member accounts for the specified account IDs.

" }, "InviteMembers":{ "name":"InviteMembers", @@ -492,7 +508,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from. Before you can use this action to invite a member, you must first create the member account in Security Hub by using the CreateMembers action. When the account owner accepts the invitation to become a member account and enables Security Hub, the master account can view the findings generated from member account.

" + "documentation":"

Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from.

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

When the account owner accepts the invitation to become a member account and enables Security Hub, the master account can view the findings generated from the member account.

" }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", @@ -507,7 +523,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Lists all findings-generating solutions (products) whose findings you have subscribed to receive in Security Hub.

" + "documentation":"

Lists all findings-generating solutions (products) that you are subscribed to receive findings from in Security Hub.

" }, "ListInvitations":{ "name":"ListInvitations", @@ -635,7 +651,23 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the Security Hub insight that the insight ARN specifies.

" + "documentation":"

Updates the Security Hub insight identified by the specified insight ARN.

" + }, + "UpdateStandardsControl":{ + "name":"UpdateStandardsControl", + "http":{ + "method":"PATCH", + "requestUri":"/standards/control/{StandardsControlArn+}" + }, + "input":{"shape":"UpdateStandardsControlRequest"}, + "output":{"shape":"UpdateStandardsControlResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Used to control whether an individual compliance standard control is enabled or disabled.

" } }, "shapes":{ @@ -725,6 +757,218 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the Availability Zone.

" + }, + "SubnetId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the subnet. You can specify one subnet per Availability Zone.

" + } + }, + "documentation":"

Information about an Availability Zone.

" + }, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"AvailabilityZone"} + }, + "AwsCloudFrontDistributionDetails":{ + "type":"structure", + "members":{ + "DomainName":{ + "shape":"NonEmptyString", + "documentation":"

The domain name corresponding to the distribution.

" + }, + "ETag":{ + "shape":"NonEmptyString", + "documentation":"

The entity tag is a hash of the object.

" + }, + "LastModifiedTime":{ + "shape":"NonEmptyString", + "documentation":"

The date and time that the distribution was last modified.

" + }, + "Logging":{ + "shape":"AwsCloudFrontDistributionLogging", + "documentation":"

A complex type that controls whether access logs are written for the distribution.

" + }, + "Origins":{ + "shape":"AwsCloudFrontDistributionOrigins", + "documentation":"

A complex type that contains information about origins for this distribution.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

Indicates the current status of the distribution.

" + }, + "WebAclId":{ + "shape":"NonEmptyString", + "documentation":"

A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution.

" + } + }, + "documentation":"

A distribution configuration.

" + }, + "AwsCloudFrontDistributionLogging":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon S3 bucket to store the access logs in.

" + }, + "Enabled":{ + "shape":"Boolean", + "documentation":"

With this field, you can enable or disable the selected distribution.

" + }, + "IncludeCookies":{ + "shape":"Boolean", + "documentation":"

Specifies whether you want CloudFront to include cookies in access logs.

" + }, + "Prefix":{ + "shape":"NonEmptyString", + "documentation":"

An optional string that you want CloudFront to use as a prefix to the access log filenames for this distribution.

" + } + }, + "documentation":"

A complex type that controls whether access logs are written for the distribution.

" + }, + "AwsCloudFrontDistributionOriginItem":{ + "type":"structure", + "members":{ + "DomainName":{ + "shape":"NonEmptyString", + "documentation":"

Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin.

" + }, + "Id":{ + "shape":"NonEmptyString", + "documentation":"

A unique identifier for the origin or origin group.

" + }, + "OriginPath":{ + "shape":"NonEmptyString", + "documentation":"

An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin.

" + } + }, + "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files.

" + }, + "AwsCloudFrontDistributionOriginItemList":{ + "type":"list", + "member":{"shape":"AwsCloudFrontDistributionOriginItem"} + }, + "AwsCloudFrontDistributionOrigins":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"AwsCloudFrontDistributionOriginItemList", + "documentation":"

A complex type that contains origins or origin groups for this distribution.

" + } + }, + "documentation":"

A complex type that contains information about origins and origin groups for this distribution.

" + }, + "AwsCodeBuildProjectDetails":{ + "type":"structure", + "members":{ + "EncryptionKey":{ + "shape":"NonEmptyString", + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) used to encrypt the build output artifacts.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK alias (using the format alias/alias-name).

" + }, + "Environment":{ + "shape":"AwsCodeBuildProjectEnvironment", + "documentation":"

Information about the build environment for this build project.

" + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

The name of the build project.

" + }, + "Source":{ + "shape":"AwsCodeBuildProjectSource", + "documentation":"

Information about the build input source code for this build project.

" + }, + "ServiceRole":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

" + }, + "VpcConfig":{ + "shape":"AwsCodeBuildProjectVpcConfig", + "documentation":"

Information about the VPC configuration that AWS CodeBuild accesses.

" + } + }, + "documentation":"

Information about an AWS CodeBuild project.

" + }, + "AwsCodeBuildProjectEnvironment":{ + "type":"structure", + "members":{ + "Certificate":{ + "shape":"NonEmptyString", + "documentation":"

The certificate to use with this build project.

" + }, + "ImagePullCredentialsType":{ + "shape":"NonEmptyString", + "documentation":"

The type of credentials AWS CodeBuild uses to pull images in your build.

Valid values:

  • CODEBUILD specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust the AWS CodeBuild service principal.

  • SERVICE_ROLE specifies that AWS CodeBuild uses your build project's service role.

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" + }, + "RegistryCredential":{ + "shape":"AwsCodeBuildProjectEnvironmentRegistryCredential", + "documentation":"

The credentials for access to a private registry.

" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The type of build environment to use for related builds.

The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Europe (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and Europe (Frankfurt).

The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).

Valid values: WINDOWS_CONTAINER | LINUX_CONTAINER | LINUX_GPU_CONTAINER | ARM_CONTAINER

" + } + }, + "documentation":"

Information about the build environment for this build project.

" + }, + "AwsCodeBuildProjectEnvironmentRegistryCredential":{ + "type":"structure", + "members":{ + "Credential":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current AWS Region.

" + }, + "CredentialProvider":{ + "shape":"NonEmptyString", + "documentation":"

The service that created the credentials to access a private Docker registry.

The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.

" + } + }, + "documentation":"

The credentials for access to a private registry.

" + }, + "AwsCodeBuildProjectSource":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The type of repository that contains the source code to be built. Valid values are:

  • BITBUCKET - The source code is in a Bitbucket repository.

  • CODECOMMIT - The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE - The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB - The source code is in a GitHub repository.

  • GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise repository.

  • NO_SOURCE - The project does not have input source code.

  • S3 - The source code is in an S3 input bucket.

" + }, + "Location":{ + "shape":"NonEmptyString", + "documentation":"

Information about the location of the source code to be built.

Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file.

" + }, + "GitCloneDepth":{ + "shape":"Integer", + "documentation":"

Information about the Git clone depth for the build project.

" + }, + "InsecureSsl":{ + "shape":"Boolean", + "documentation":"

Whether to ignore SSL warnings while connecting to the project source code.

" + } + }, + "documentation":"

Information about the build input source code for this build project.

" + }, + "AwsCodeBuildProjectVpcConfig":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the VPC.

" + }, + "Subnets":{ + "shape":"NonEmptyStringList", + "documentation":"

A list of one or more subnet IDs in your Amazon VPC.

" + }, + "SecurityGroupIds":{ + "shape":"NonEmptyStringList", + "documentation":"

A list of one or more security group IDs in your Amazon VPC.

" + } + }, + "documentation":"

Information about the VPC configuration that AWS CodeBuild accesses.

" + }, "AwsEc2InstanceDetails":{ "type":"structure", "members":{ @@ -767,12 +1011,384 @@ }, "documentation":"

The details of an Amazon EC2 instance.

" }, + "AwsEc2NetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachTime":{ + "shape":"NonEmptyString", + "documentation":"

The timestamp indicating when the attachment initiated.

" + }, + "AttachmentId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the network interface attachment

" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "documentation":"

Indicates whether the network interface is deleted when the instance is terminated.

" + }, + "DeviceIndex":{ + "shape":"Integer", + "documentation":"

The device index of the network interface attachment on the instance.

" + }, + "InstanceId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the instance.

" + }, + "InstanceOwnerId":{ + "shape":"NonEmptyString", + "documentation":"

The AWS account ID of the owner of the instance.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

The attachment state.

Valid values: attaching | attached | detaching | detached

" + } + }, + "documentation":"

Information about the network interface attachment.

" + }, + "AwsEc2NetworkInterfaceDetails":{ + "type":"structure", + "members":{ + "Attachment":{ + "shape":"AwsEc2NetworkInterfaceAttachment", + "documentation":"

The network interface attachment.

" + }, + "NetworkInterfaceId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the network interface.

" + }, + "SecurityGroups":{ + "shape":"AwsEc2NetworkInterfaceSecurityGroupList", + "documentation":"

Security groups for the network interface.

" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "documentation":"

Indicates whether traffic to or from the instance is validated.

" + } + }, + "documentation":"

Details about the network interface

" + }, + "AwsEc2NetworkInterfaceSecurityGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the security group.

" + }, + "GroupId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the security group.

" + } + }, + "documentation":"

A security group associated with the network interface.

" + }, + "AwsEc2NetworkInterfaceSecurityGroupList":{ + "type":"list", + "member":{"shape":"AwsEc2NetworkInterfaceSecurityGroup"} + }, + "AwsEc2SecurityGroupDetails":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the security group.

" + }, + "GroupId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the security group.

" + }, + "OwnerId":{ + "shape":"NonEmptyString", + "documentation":"

The AWS account ID of the owner of the security group.

" + }, + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"

[VPC only] The ID of the VPC for the security group.

" + }, + "IpPermissions":{ + "shape":"AwsEc2SecurityGroupIpPermissionList", + "documentation":"

The inbound rules associated with the security group.

" + }, + "IpPermissionsEgress":{ + "shape":"AwsEc2SecurityGroupIpPermissionList", + "documentation":"

[VPC only] The outbound rules associated with the security group.

" + } + }, + "documentation":"

Details about an EC2 security group.

" + }, + "AwsEc2SecurityGroupIpPermission":{ + "type":"structure", + "members":{ + "IpProtocol":{ + "shape":"NonEmptyString", + "documentation":"

The IP protocol name (tcp, udp, icmp, icmpv6) or number.

[VPC only] Use -1 to specify all protocols.

When authorizing security group rules, specifying -1 or a protocol number other than tcp, udp, icmp, or icmpv6 allows traffic on all ports, regardless of any port range you specify.

For tcp, udp, and icmp, you must specify a port range.

For icmpv6, the port range is optional. If you omit the port range, traffic for all types and codes is allowed.

" + }, + "FromPort":{ + "shape":"Integer", + "documentation":"

The start of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number.

A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.

" + }, + "ToPort":{ + "shape":"Integer", + "documentation":"

The end of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.

A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify all codes.

" + }, + "UserIdGroupPairs":{ + "shape":"AwsEc2SecurityGroupUserIdGroupPairList", + "documentation":"

The security group and AWS account ID pairs.

" + }, + "IpRanges":{ + "shape":"AwsEc2SecurityGroupIpRangeList", + "documentation":"

The IPv4 ranges.

" + }, + "Ipv6Ranges":{ + "shape":"AwsEc2SecurityGroupIpv6RangeList", + "documentation":"

The IPv6 ranges.

" + }, + "PrefixListIds":{ + "shape":"AwsEc2SecurityGroupPrefixListIdList", + "documentation":"

[VPC only] The prefix list IDs for an AWS service. With outbound rules, this is the AWS service to access through a VPC endpoint from instances associated with the security group.

" + } + }, + "documentation":"

An IP permission for an EC2 security group.

" + }, + "AwsEc2SecurityGroupIpPermissionList":{ + "type":"list", + "member":{"shape":"AwsEc2SecurityGroupIpPermission"} + }, + "AwsEc2SecurityGroupIpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"NonEmptyString", + "documentation":"

The IPv4 CIDR range. You can either specify either a CIDR range or a source security group, but not both. To specify a single IPv4 address, use the /32 prefix length.

" + } + }, + "documentation":"

A range of IPv4 addresses.

" + }, + "AwsEc2SecurityGroupIpRangeList":{ + "type":"list", + "member":{"shape":"AwsEc2SecurityGroupIpRange"} + }, + "AwsEc2SecurityGroupIpv6Range":{ + "type":"structure", + "members":{ + "CidrIpv6":{ + "shape":"NonEmptyString", + "documentation":"

The IPv6 CIDR range. You can either specify either a CIDR range or a source security group, but not both. To specify a single IPv6 address, use the /128 prefix length.

" + } + }, + "documentation":"

A range of IPv6 addresses.

" + }, + "AwsEc2SecurityGroupIpv6RangeList":{ + "type":"list", + "member":{"shape":"AwsEc2SecurityGroupIpv6Range"} + }, + "AwsEc2SecurityGroupPrefixListId":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the prefix.

" + } + }, + "documentation":"

A prefix list ID.

" + }, + "AwsEc2SecurityGroupPrefixListIdList":{ + "type":"list", + "member":{"shape":"AwsEc2SecurityGroupPrefixListId"} + }, + "AwsEc2SecurityGroupUserIdGroupPair":{ + "type":"structure", + "members":{ + "GroupId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the security group.

" + }, + "GroupName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the security group.

" + }, + "PeeringStatus":{ + "shape":"NonEmptyString", + "documentation":"

The status of a VPC peering connection, if applicable.

" + }, + "UserId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of an AWS account.

For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.

[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS.

" + }, + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the VPC for the referenced security group, if applicable.

" + }, + "VpcPeeringConnectionId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the VPC peering connection, if applicable.

" + } + }, + "documentation":"

A relationship between a security group and a user.

" + }, + "AwsEc2SecurityGroupUserIdGroupPairList":{ + "type":"list", + "member":{"shape":"AwsEc2SecurityGroupUserIdGroupPair"} + }, + "AwsElasticsearchDomainDetails":{ + "type":"structure", + "members":{ + "AccessPolicies":{ + "shape":"NonEmptyString", + "documentation":"

IAM policy document specifying the access policies for the new Amazon ES domain.

" + }, + "DomainEndpointOptions":{ + "shape":"AwsElasticsearchDomainDomainEndpointOptions", + "documentation":"

Additional options for the domain endpoint.

" + }, + "DomainId":{ + "shape":"NonEmptyString", + "documentation":"

Unique identifier for an Amazon ES domain.

" + }, + "DomainName":{ + "shape":"NonEmptyString", + "documentation":"

Name of an Amazon ES domain.

Domain names are unique across all domains owned by the same account within an AWS Region.

Domain names must start with a lowercase letter and must be between 3 and 28 characters.

Valid characters are a-z (lowercase only), 0-9, and – (hyphen).

" + }, + "Endpoint":{ + "shape":"NonEmptyString", + "documentation":"

Domain-specific endpoint used to submit index, search, and data upload requests to an Amazon ES domain.

The endpoint is a service URL.

" + }, + "Endpoints":{ + "shape":"FieldMap", + "documentation":"

The key-value pair that exists if the Amazon ES domain uses VPC endpoints.

" + }, + "ElasticsearchVersion":{ + "shape":"NonEmptyString", + "documentation":"

Elasticsearch version.

" + }, + "EncryptionAtRestOptions":{ + "shape":"AwsElasticsearchDomainEncryptionAtRestOptions", + "documentation":"

Details about the configuration for encryption at rest.

" + }, + "NodeToNodeEncryptionOptions":{ + "shape":"AwsElasticsearchDomainNodeToNodeEncryptionOptions", + "documentation":"

Details about the configuration for node-to-node encryption.

" + }, + "VPCOptions":{ + "shape":"AwsElasticsearchDomainVPCOptions", + "documentation":"

Information that Amazon ES derives based on VPCOptions for the domain.

" + } + }, + "documentation":"

Information about an Elasticsearch domain.

" + }, + "AwsElasticsearchDomainDomainEndpointOptions":{ + "type":"structure", + "members":{ + "EnforceHTTPS":{ + "shape":"Boolean", + "documentation":"

Whether to require that all traffic to the domain arrive over HTTPS.

" + }, + "TLSSecurityPolicy":{ + "shape":"NonEmptyString", + "documentation":"

The TLS security policy to apply to the HTTPS endpoint of the Elasticsearch domain.

Valid values:

  • Policy-Min-TLS-1-0-2019-07, which supports TLSv1.0 and higher

  • Policy-Min-TLS-1-2-2019-07, which only supports TLSv1.2

" + } + }, + "documentation":"

Additional options for the domain endpoint, such as whether to require HTTPS for all traffic.

" + }, + "AwsElasticsearchDomainEncryptionAtRestOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Whether encryption at rest is enabled.

" + }, + "KmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The KMS key ID. Takes the form 1a2a3a4-1a2a-3a4a-5a6a-1a2a3a4a5a6a.

" + } + }, + "documentation":"

Details about the configuration for encryption at rest.

" + }, + "AwsElasticsearchDomainNodeToNodeEncryptionOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Whether node-to-node encryption is enabled.

" + } + }, + "documentation":"

Details about the configuration for node-to-node encryption.

" + }, + "AwsElasticsearchDomainVPCOptions":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"NonEmptyStringList", + "documentation":"

The list of Availability Zones associated with the VPC subnets.

" + }, + "SecurityGroupIds":{ + "shape":"NonEmptyStringList", + "documentation":"

The list of security group IDs associated with the VPC endpoints for the domain.

" + }, + "SubnetIds":{ + "shape":"NonEmptyStringList", + "documentation":"

A list of subnet IDs associated with the VPC endpoints for the domain.

" + }, + "VPCId":{ + "shape":"NonEmptyString", + "documentation":"

ID for the VPC.

" + } + }, + "documentation":"

Information that Amazon ES derives based on VPCOptions for the domain.

" + }, + "AwsElbv2LoadBalancerDetails":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

The Availability Zones for the load balancer.

" + }, + "CanonicalHostedZoneId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the Amazon Route 53 hosted zone associated with the load balancer.

" + }, + "CreatedTime":{ + "shape":"NonEmptyString", + "documentation":"

The date and time the load balancer was created.

" + }, + "DNSName":{ + "shape":"NonEmptyString", + "documentation":"

The public DNS name of the load balancer.

" + }, + "IpAddressType":{ + "shape":"NonEmptyString", + "documentation":"

The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + }, + "Scheme":{ + "shape":"NonEmptyString", + "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses.

" + }, + "SecurityGroups":{ + "shape":"SecurityGroups", + "documentation":"

The IDs of the security groups for the load balancer.

" + }, + "State":{ + "shape":"LoadBalancerState", + "documentation":"

The state of the load balancer.

" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The type of load balancer.

" + }, + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the VPC for the load balancer.

" + } + }, + "documentation":"

Information about a load balancer.

" + }, "AwsIamAccessKeyDetails":{ "type":"structure", "members":{ "UserName":{ "shape":"NonEmptyString", - "documentation":"

The user associated with the IAM access key related to a finding.

" + "documentation":"

The user associated with the IAM access key related to a finding.

The UserName parameter has been replaced with the PrincipalName parameter because access keys can also be assigned to principals that are not IAM users.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use PrincipalName instead." }, "Status":{ "shape":"AwsIamAccessKeyStatus", @@ -781,6 +1397,18 @@ "CreatedAt":{ "shape":"NonEmptyString", "documentation":"

The creation date/time of the IAM access key related to a finding.

" + }, + "PrincipalId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the principal associated with an access key.

" + }, + "PrincipalType":{ + "shape":"NonEmptyString", + "documentation":"

The type of principal associated with an access key.

" + }, + "PrincipalName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the principal.

" } }, "documentation":"

IAM access key details related to a finding.

" @@ -792,6 +1420,415 @@ "Inactive" ] }, + "AwsIamRoleAssumeRolePolicyDocument":{ + "type":"string", + "max":131072, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+" + }, + "AwsIamRoleDetails":{ + "type":"structure", + "members":{ + "AssumeRolePolicyDocument":{ + "shape":"AwsIamRoleAssumeRolePolicyDocument", + "documentation":"

The trust policy that grants permission to assume the role.

" + }, + "CreateDate":{ + "shape":"NonEmptyString", + "documentation":"

The date and time, in ISO 8601 date-time format, when the role was created.

" + }, + "RoleId":{ + "shape":"NonEmptyString", + "documentation":"

The stable and unique string identifying the role.

" + }, + "RoleName":{ + "shape":"NonEmptyString", + "documentation":"

The friendly name that identifies the role.

" + }, + "MaxSessionDuration":{ + "shape":"Integer", + "documentation":"

The maximum session duration (in seconds) that you want to set for the specified role.

" + }, + "Path":{ + "shape":"NonEmptyString", + "documentation":"

The path to the role.

" + } + }, + "documentation":"

Contains information about an IAM role, including all of the role's policies.

" + }, + "AwsKmsKeyDetails":{ + "type":"structure", + "members":{ + "AWSAccountId":{ + "shape":"NonEmptyString", + "documentation":"

The twelve-digit account ID of the AWS account that owns the CMK.

" + }, + "CreationDate":{ + "shape":"Double", + "documentation":"

The date and time when the CMK was created.

" + }, + "KeyId":{ + "shape":"NonEmptyString", + "documentation":"

The globally unique identifier for the CMK.

" + }, + "KeyManager":{ + "shape":"NonEmptyString", + "documentation":"

The manager of the CMK. CMKs in your AWS account are either customer managed or AWS managed.

" + }, + "KeyState":{ + "shape":"NonEmptyString", + "documentation":"

The state of the CMK.

" + }, + "Origin":{ + "shape":"NonEmptyString", + "documentation":"

The source of the CMK's key material.

When this value is AWS_KMS, AWS KMS created the key material.

When this value is EXTERNAL, the key material was imported from your existing key management infrastructure or the CMK lacks key material.

When this value is AWS_CLOUDHSM, the key material was created in the AWS CloudHSM cluster associated with a custom key store.

" + } + }, + "documentation":"

Contains metadata about a customer master key (CMK).

" + }, + "AwsLambdaFunctionCode":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"NonEmptyString", + "documentation":"

An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.

" + }, + "S3Key":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon S3 key of the deployment package.

" + }, + "S3ObjectVersion":{ + "shape":"NonEmptyString", + "documentation":"

For versioned objects, the version of the deployment package object to use.

" + }, + "ZipFile":{ + "shape":"NonEmptyString", + "documentation":"

The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for you.

" + } + }, + "documentation":"

The code for the Lambda function. You can specify either an object in Amazon S3, or upload a deployment package directly.

" + }, + "AwsLambdaFunctionDeadLetterConfig":{ + "type":"structure", + "members":{ + "TargetArn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.

" + } + }, + "documentation":"

The dead-letter queue for failed asynchronous invocations.

" + }, + "AwsLambdaFunctionDetails":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"AwsLambdaFunctionCode", + "documentation":"

An AwsLambdaFunctionCode object.

" + }, + "CodeSha256":{ + "shape":"NonEmptyString", + "documentation":"

The SHA256 hash of the function's deployment package.

" + }, + "DeadLetterConfig":{ + "shape":"AwsLambdaFunctionDeadLetterConfig", + "documentation":"

The function's dead letter queue.

" + }, + "Environment":{ + "shape":"AwsLambdaFunctionEnvironment", + "documentation":"

The function's environment variables.

" + }, + "FunctionName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the function.

" + }, + "Handler":{ + "shape":"NonEmptyString", + "documentation":"

The function that Lambda calls to begin executing your function.

" + }, + "KmsKeyArn":{ + "shape":"NonEmptyString", + "documentation":"

The KMS key that's used to encrypt the function's environment variables. This key is only returned if you've configured a customer managed CMK.

" + }, + "LastModified":{ + "shape":"NonEmptyString", + "documentation":"

The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "Layers":{ + "shape":"AwsLambdaFunctionLayerList", + "documentation":"

The function's layers.

" + }, + "MasterArn":{ + "shape":"NonEmptyString", + "documentation":"

For Lambda@Edge functions, the ARN of the master function.

" + }, + "MemorySize":{ + "shape":"Integer", + "documentation":"

The memory that's allocated to the function.

" + }, + "RevisionId":{ + "shape":"NonEmptyString", + "documentation":"

The latest updated revision of the function or alias.

" + }, + "Role":{ + "shape":"NonEmptyString", + "documentation":"

The function's execution role.

" + }, + "Runtime":{ + "shape":"NonEmptyString", + "documentation":"

The runtime environment for the Lambda function.

" + }, + "Timeout":{ + "shape":"Integer", + "documentation":"

The amount of time that Lambda allows a function to run before stopping it.

" + }, + "TracingConfig":{ + "shape":"AwsLambdaFunctionTracingConfig", + "documentation":"

The function's AWS X-Ray tracing configuration.

" + }, + "VpcConfig":{ + "shape":"AwsLambdaFunctionVpcConfig", + "documentation":"

The function's networking configuration.

" + }, + "Version":{ + "shape":"NonEmptyString", + "documentation":"

The version of the Lambda function.

" + } + }, + "documentation":"

Details about a function's configuration.

" + }, + "AwsLambdaFunctionEnvironment":{ + "type":"structure", + "members":{ + "Variables":{ + "shape":"FieldMap", + "documentation":"

Environment variable key-value pairs.

" + }, + "Error":{ + "shape":"AwsLambdaFunctionEnvironmentError", + "documentation":"

An AwsLambdaFunctionEnvironmentError object.

" + } + }, + "documentation":"

A function's environment variable settings.

" + }, + "AwsLambdaFunctionEnvironmentError":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"NonEmptyString", + "documentation":"

The error code.

" + }, + "Message":{ + "shape":"NonEmptyString", + "documentation":"

The error message.

" + } + }, + "documentation":"

Error messages for environment variables that couldn't be applied.

" + }, + "AwsLambdaFunctionLayer":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" + }, + "CodeSize":{ + "shape":"Integer", + "documentation":"

The size of the layer archive in bytes.

" + } + }, + "documentation":"

An AWS Lambda layer.

" + }, + "AwsLambdaFunctionLayerList":{ + "type":"list", + "member":{"shape":"AwsLambdaFunctionLayer"} + }, + "AwsLambdaFunctionTracingConfig":{ + "type":"structure", + "members":{ + "Mode":{ + "shape":"NonEmptyString", + "documentation":"

The tracing mode.

" + } + }, + "documentation":"

The function's AWS X-Ray tracing configuration.

" + }, + "AwsLambdaFunctionVpcConfig":{ + "type":"structure", + "members":{ + "SecurityGroupIds":{ + "shape":"NonEmptyStringList", + "documentation":"

A list of VPC security groups IDs.

" + }, + "SubnetIds":{ + "shape":"NonEmptyStringList", + "documentation":"

A list of VPC subnet IDs.

" + }, + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the VPC.

" + } + }, + "documentation":"

The VPC security groups and subnets that are attached to a Lambda function. For more information, see VPC Settings.

" + }, + "AwsLambdaLayerVersionDetails":{ + "type":"structure", + "members":{ + "Version":{ + "shape":"AwsLambdaLayerVersionNumber", + "documentation":"

The version number.

" + }, + "CompatibleRuntimes":{ + "shape":"NonEmptyStringList", + "documentation":"

The layer's compatible runtimes. Maximum number of 5 items.

Valid values: nodejs8.10 | nodejs10.x | nodejs12.x | java8 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | dotnetcore1.0 | dotnetcore2.1 | go1.x | ruby2.5 | provided

" + }, + "CreatedDate":{ + "shape":"NonEmptyString", + "documentation":"

The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000.

" + } + }, + "documentation":"

Details about a Lambda layer version.

" + }, + "AwsLambdaLayerVersionNumber":{"type":"long"}, + "AwsRdsDbInstanceAssociatedRole":{ + "type":"structure", + "members":{ + "RoleArn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that is associated with the DB instance.

" + }, + "FeatureName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the feature associated with the IAM)role.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

Describes the state of the association between the IAM role and the DB instance. The Status property returns one of the following values:

  • ACTIVE - the IAM role ARN is associated with the DB instance and can be used to access other AWS services on your behalf.

  • PENDING - the IAM role ARN is being associated with the DB instance.

  • INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable to assume the IAM role in order to access other AWS services on your behalf.

" + } + }, + "documentation":"

An AWS Identity and Access Management (IAM) role associated with the DB instance.

" + }, + "AwsRdsDbInstanceAssociatedRoles":{ + "type":"list", + "member":{"shape":"AwsRdsDbInstanceAssociatedRole"} + }, + "AwsRdsDbInstanceDetails":{ + "type":"structure", + "members":{ + "AssociatedRoles":{ + "shape":"AwsRdsDbInstanceAssociatedRoles", + "documentation":"

The AWS Identity and Access Management (IAM) roles associated with the DB instance.

" + }, + "CACertificateIdentifier":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the CA certificate for this DB instance.

" + }, + "DBClusterIdentifier":{ + "shape":"NonEmptyString", + "documentation":"

If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.

" + }, + "DBInstanceIdentifier":{ + "shape":"NonEmptyString", + "documentation":"

Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.

" + }, + "DBInstanceClass":{ + "shape":"NonEmptyString", + "documentation":"

Contains the name of the compute and memory capacity class of the DB instance.

" + }, + "DbInstancePort":{ + "shape":"Integer", + "documentation":"

Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

" + }, + "DbiResourceId":{ + "shape":"NonEmptyString", + "documentation":"

The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.

" + }, + "DBName":{ + "shape":"NonEmptyString", + "documentation":"

The meaning of this parameter differs according to the database engine you use.

MySQL, MariaDB, SQL Server, PostgreSQL

Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

Oracle

Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

" + }, + "DeletionProtection":{ + "shape":"Boolean", + "documentation":"

Indicates whether the DB instance has deletion protection enabled.

When deletion protection is enabled, the database cannot be deleted.

" + }, + "Endpoint":{ + "shape":"AwsRdsDbInstanceEndpoint", + "documentation":"

Specifies the connection endpoint.

" + }, + "Engine":{ + "shape":"NonEmptyString", + "documentation":"

Provides the name of the database engine to use for this DB instance.

" + }, + "EngineVersion":{ + "shape":"NonEmptyString", + "documentation":"

Indicates the database engine version.

" + }, + "IAMDatabaseAuthenticationEnabled":{ + "shape":"Boolean", + "documentation":"

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

IAM database authentication can be enabled for the following database engines.

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

  • Aurora 5.6 or higher

" + }, + "InstanceCreateTime":{ + "shape":"NonEmptyString", + "documentation":"

Provides the date and time the DB instance was created.

" + }, + "KmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB instance.

" + }, + "PubliclyAccessible":{ + "shape":"Boolean", + "documentation":"

Specifies the accessibility options for the DB instance.

A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address.

A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB instance is encrypted.

" + }, + "TdeCredentialArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN from the key store with which the instance is associated for TDE encryption.

" + }, + "VpcSecurityGroups":{ + "shape":"AwsRdsDbInstanceVpcSecurityGroups", + "documentation":"

A list of VPC security groups that the DB instance belongs to.

" + } + }, + "documentation":"

Contains the details of an Amazon RDS DB instance.

" + }, + "AwsRdsDbInstanceEndpoint":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"NonEmptyString", + "documentation":"

Specifies the DNS address of the DB instance.

" + }, + "Port":{ + "shape":"Integer", + "documentation":"

Specifies the port that the database engine is listening on.

" + }, + "HostedZoneId":{ + "shape":"NonEmptyString", + "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" + } + }, + "documentation":"

Specifies the connection endpoint.

" + }, + "AwsRdsDbInstanceVpcSecurityGroup":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{ + "shape":"NonEmptyString", + "documentation":"

The name of the VPC security group.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

The status of the VPC security group.

" + } + }, + "documentation":"

A VPC security groups that the DB instance belongs to.

" + }, + "AwsRdsDbInstanceVpcSecurityGroups":{ + "type":"list", + "member":{"shape":"AwsRdsDbInstanceVpcSecurityGroup"} + }, "AwsS3BucketDetails":{ "type":"structure", "members":{ @@ -869,11 +1906,11 @@ }, "Confidence":{ "shape":"Integer", - "documentation":"

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" + "documentation":"

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.

Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" }, "Criticality":{ "shape":"Integer", - "documentation":"

The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" + "documentation":"

The level of importance assigned to the resources associated with the finding.

A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" }, "Title":{ "shape":"NonEmptyString", @@ -913,7 +1950,7 @@ }, "ThreatIntelIndicators":{ "shape":"ThreatIntelIndicatorList", - "documentation":"

Threat intel details related to a finding.

" + "documentation":"

Threat intelligence details related to a finding.

" }, "Resources":{ "shape":"ResourceList", @@ -999,11 +2036,11 @@ }, "Confidence":{ "shape":"NumberFilterList", - "documentation":"

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" + "documentation":"

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.

Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" }, "Criticality":{ "shape":"NumberFilterList", - "documentation":"

The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" + "documentation":"

The level of importance assigned to the resources associated with the finding.

A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" }, "Title":{ "shape":"StringFilterList", @@ -1123,27 +2160,27 @@ }, "ThreatIntelIndicatorType":{ "shape":"StringFilterList", - "documentation":"

The type of a threat intel indicator.

" + "documentation":"

The type of a threat intelligence indicator.

" }, "ThreatIntelIndicatorValue":{ "shape":"StringFilterList", - "documentation":"

The value of a threat intel indicator.

" + "documentation":"

The value of a threat intelligence indicator.

" }, "ThreatIntelIndicatorCategory":{ "shape":"StringFilterList", - "documentation":"

The category of a threat intel indicator.

" + "documentation":"

The category of a threat intelligence indicator.

" }, "ThreatIntelIndicatorLastObservedAt":{ "shape":"DateFilterList", - "documentation":"

The date/time of the last observation of a threat intel indicator.

" + "documentation":"

The date/time of the last observation of a threat intelligence indicator.

" }, "ThreatIntelIndicatorSource":{ "shape":"StringFilterList", - "documentation":"

The source of the threat intel.

" + "documentation":"

The source of the threat intelligence.

" }, "ThreatIntelIndicatorSourceUrl":{ "shape":"StringFilterList", - "documentation":"

The URL for more details from the source of the threat intel.

" + "documentation":"

The URL for more details from the source of the threat intelligence.

" }, "ResourceType":{ "shape":"StringFilterList", @@ -1199,7 +2236,7 @@ }, "ResourceAwsEc2InstanceLaunchedAt":{ "shape":"DateFilterList", - "documentation":"

The date/time the instance was launched.

" + "documentation":"

The date and time the instance was launched.

" }, "ResourceAwsS3BucketOwnerId":{ "shape":"StringFilterList", @@ -1282,12 +2319,130 @@ "documentation":"

A keyword for a finding.

" } }, - "documentation":"

A collection of attributes that are applied to all active Security Hub-aggregated findings and that result in a subset of findings that are included in this insight.

" + "documentation":"

A collection of attributes that are applied to all active Security Hub-aggregated findings and that result in a subset of findings that are included in this insight.

" }, "AwsSecurityFindingList":{ "type":"list", "member":{"shape":"AwsSecurityFinding"} }, + "AwsSnsTopicDetails":{ + "type":"structure", + "members":{ + "KmsMasterKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK.

" + }, + "Subscription":{ + "shape":"AwsSnsTopicSubscriptionList", + "documentation":"

Subscription is an embedded property that describes the subscription endpoints of an Amazon SNS topic.

" + }, + "TopicName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the topic.

" + }, + "Owner":{ + "shape":"NonEmptyString", + "documentation":"

The subscription's owner.

" + } + }, + "documentation":"

A wrapper type for the topic's Amazon Resource Name (ARN).

" + }, + "AwsSnsTopicSubscription":{ + "type":"structure", + "members":{ + "Endpoint":{ + "shape":"NonEmptyString", + "documentation":"

The subscription's endpoint (format depends on the protocol).

" + }, + "Protocol":{ + "shape":"NonEmptyString", + "documentation":"

The subscription's protocol.

" + } + }, + "documentation":"

A wrapper type for the attributes of an Amazon SNS subscription.

" + }, + "AwsSnsTopicSubscriptionList":{ + "type":"list", + "member":{"shape":"AwsSnsTopicSubscription"} + }, + "AwsSqsQueueDetails":{ + "type":"structure", + "members":{ + "KmsDataKeyReusePeriodSeconds":{ + "shape":"Integer", + "documentation":"

The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.

" + }, + "KmsMasterKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.

" + }, + "QueueName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the new queue.

" + }, + "DeadLetterTargetArn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

" + } + }, + "documentation":"

Data about a queue.

" + }, + "AwsWafWebAclDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

" + }, + "DefaultAction":{ + "shape":"NonEmptyString", + "documentation":"

The action to perform if none of the Rules contained in the WebACL match.

" + }, + "Rules":{ + "shape":"AwsWafWebAclRuleList", + "documentation":"

An array that contains the action for each rule in a WebACL, the priority of the rule, and the ID of the rule.

" + }, + "WebAclId":{ + "shape":"NonEmptyString", + "documentation":"

A unique identifier for a WebACL.

" + } + }, + "documentation":"

Details about a WAF WebACL.

" + }, + "AwsWafWebAclRule":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"WafAction", + "documentation":"

Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule.

" + }, + "ExcludedRules":{ + "shape":"WafExcludedRuleList", + "documentation":"

Rules to exclude from a rule group.

" + }, + "OverrideAction":{ + "shape":"WafOverrideAction", + "documentation":"

Use the OverrideAction to test your RuleGroup.

Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None, the RuleGroup blocks a request if any individual rule in the RuleGroup matches the request and is configured to block that request.

However, if you first want to test the RuleGroup, set the OverrideAction to Count. The RuleGroup then overrides any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests are counted.

ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL. In this case you do not use ActivatedRule|Action. For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction.

" + }, + "Priority":{ + "shape":"Integer", + "documentation":"

Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL, the values do not need to be consecutive.

" + }, + "RuleId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier for a Rule.

" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The rule type.

Valid values: REGULAR | RATE_BASED | GROUP

The default is REGULAR.

" + } + }, + "documentation":"

Details for a rule in a WAF WebACL.

" + }, + "AwsWafWebAclRuleList":{ + "type":"list", + "member":{"shape":"AwsWafWebAclRule"} + }, "BatchDisableStandardsRequest":{ "type":"structure", "required":["StandardsSubscriptionArns"], @@ -1332,7 +2487,7 @@ "members":{ "Findings":{ "shape":"AwsSecurityFindingList", - "documentation":"

A list of findings to import. To successfully import a finding, it must follow the AWS Security Finding Format.

" + "documentation":"

A list of findings to import. To successfully import a finding, it must follow the AWS Security Finding Format. Maximum of 100 findings per request.

" } } }, @@ -1353,7 +2508,7 @@ }, "FailedFindings":{ "shape":"ImportFindingsErrorList", - "documentation":"

The list of the findings that failed to import.

" + "documentation":"

The list of findings that failed to import.

" } } }, @@ -1368,9 +2523,13 @@ "Status":{ "shape":"ComplianceStatus", "documentation":"

The result of a compliance check.

" + }, + "RelatedRequirements":{ + "shape":"RelatedRequirementsList", + "documentation":"

List of requirements that are related to a standards control.

" } }, - "documentation":"

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, CIS AWS Foundations). Contains compliance-related finding details.

" + "documentation":"

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard (for example, CIS AWS Foundations). Contains compliance-related finding details.

Values include the following:

  • Allowed values are the following:

    • PASSED - Compliance check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported given your configuration.

    • FAILED - Compliance check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the AWS Config evaluation was NOT_APPLICABLE. If the AWS Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding.

" }, "ComplianceStatus":{ "type":"string", @@ -1403,6 +2562,13 @@ }, "documentation":"

Container details related to a finding.

" }, + "ControlStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "CreateActionTargetRequest":{ "type":"structure", "required":[ @@ -1449,7 +2615,7 @@ }, "Filters":{ "shape":"AwsSecurityFindingFilters", - "documentation":"

One or more attributes used to filter the findings included in the insight. Only findings that match the criteria defined in the filters are included in the insight.

" + "documentation":"

One or more attributes used to filter the findings included in the insight. The insight only includes findings that match the criteria defined in the filters.

" }, "GroupByAttribute":{ "shape":"NonEmptyString", @@ -1472,7 +2638,7 @@ "members":{ "AccountDetails":{ "shape":"AccountDetailsList", - "documentation":"

A list of account ID and email address pairs of the accounts to associate with the Security Hub master account.

" + "documentation":"

The list of accounts to associate with the Security Hub master account. For each account, the list includes the account ID and the email address.

" } } }, @@ -1481,7 +2647,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

A list of account ID and email address pairs of the AWS accounts that weren't processed.

" + "documentation":"

The list of AWS accounts that were not processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -1531,7 +2697,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

A list of account IDs that specify the accounts that invitations to Security Hub are declined from.

" + "documentation":"

The list of account IDs for the accounts from which to decline the invitations to Security Hub.

" } } }, @@ -1540,7 +2706,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

A list of account ID and email address pairs of the AWS accounts that weren't processed.

" + "documentation":"

The list of AWS accounts that were not processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -1594,7 +2760,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

A list of the account IDs that sent the invitations to delete.

" + "documentation":"

The list of the account IDs that sent the invitations to delete.

" } } }, @@ -1603,7 +2769,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

A list of account ID and email address pairs of the AWS accounts that invitations weren't deleted for.

" + "documentation":"

The list of AWS accounts for which the invitations were not deleted. For each account, the list includes the account ID and the email address.

" } } }, @@ -1612,7 +2778,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

A list of account IDs of the member accounts to delete.

" + "documentation":"

The list of account IDs for the member accounts to delete.

" } } }, @@ -1621,7 +2787,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

A list of account ID and email address pairs of the AWS accounts that weren't deleted.

" + "documentation":"

The list of AWS accounts that were not deleted. For each account, the list includes the account ID and the email address.

" } } }, @@ -1672,7 +2838,7 @@ "members":{ "HubArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the Hub resource retrieved.

" + "documentation":"

The ARN of the Hub resource that was retrieved.

" }, "SubscribedAt":{ "shape":"NonEmptyString", @@ -1711,6 +2877,43 @@ } } }, + "DescribeStandardsControlsRequest":{ + "type":"structure", + "required":["StandardsSubscriptionArn"], + "members":{ + "StandardsSubscriptionArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of a resource that represents your subscription to a supported standard.

", + "location":"uri", + "locationName":"StandardsSubscriptionArn" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

For requests to get the next page of results, the pagination token that was returned with the previous set of results. The initial request does not include a pagination token.

", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of compliance standard controls to return.

", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "DescribeStandardsControlsResponse":{ + "type":"structure", + "members":{ + "Controls":{ + "shape":"StandardsControls", + "documentation":"

A list of compliance standards controls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are more compliance standards control remaining in the results, then this is the pagination token to use to request the next page of compliance standard controls.

" + } + } + }, "DisableImportFindingsForProductRequest":{ "type":"structure", "required":["ProductSubscriptionArn"], @@ -1806,11 +3009,11 @@ "members":{ "StandardsSubscriptionArns":{ "shape":"StandardsSubscriptionArns", - "documentation":"

A list of the standards subscription ARNs for the standards to retrieve.

" + "documentation":"

The list of the standards subscription ARNs for the standards to retrieve.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

Paginates results. On your first call to the GetEnabledStandards operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + "documentation":"

Paginates results. On your first call to the GetEnabledStandards operation, set the value of this parameter to NULL.

For subsequent calls to the operation, to continue listing data, set nextToken in the request to the value of nextToken from the previous response.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1823,7 +3026,7 @@ "members":{ "StandardsSubscriptions":{ "shape":"StandardsSubscriptions", - "documentation":"

A list of StandardsSubscriptions objects that include information about the enabled standards.

" + "documentation":"

The list of StandardsSubscriptions objects that include information about the enabled standards.

" }, "NextToken":{ "shape":"NextToken", @@ -1836,15 +3039,15 @@ "members":{ "Filters":{ "shape":"AwsSecurityFindingFilters", - "documentation":"

The findings attributes used to define a condition to filter the findings returned.

" + "documentation":"

The finding attributes used to define a condition to filter the returned findings.

" }, "SortCriteria":{ "shape":"SortCriteria", - "documentation":"

Findings attributes used to sort the list of findings returned.

" + "documentation":"

The finding attributes used to sort the list of returned findings.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

Paginates results. On your first call to the GetFindings operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + "documentation":"

Paginates results. On your first call to the GetFindings operation, set the value of this parameter to NULL.

For subsequent calls to the operation, to continue listing data, set nextToken in the request to the value of nextToken from the previous response.

" }, "MaxResults":{ "shape":"MaxResults", @@ -1872,7 +3075,7 @@ "members":{ "InsightArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the insight whose results you want to see.

", + "documentation":"

The ARN of the insight for which to return results.

", "location":"uri", "locationName":"InsightArn" } @@ -1893,15 +3096,15 @@ "members":{ "InsightArns":{ "shape":"ArnList", - "documentation":"

The ARNs of the insights that you want to describe.

" + "documentation":"

The ARNs of the insights to describe.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

Paginates results. On your first call to the GetInsights operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + "documentation":"

Paginates results. On your first call to the GetInsights operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set nextToken in the request to the value of nextToken from the previous response.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of items that you want in the response.

" + "documentation":"

The maximum number of items to return in the response.

" } } }, @@ -1929,7 +3132,7 @@ "members":{ "InvitationsCount":{ "shape":"Integer", - "documentation":"

The number of all membership invitations sent to this Security Hub member account, not including the currently accepted invitation.

" + "documentation":"

The number of all membership invitations sent to this Security Hub member account, not including the currently accepted invitation.

" } } }, @@ -1953,7 +3156,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

A list of account IDs for the Security Hub member accounts that you want to return the details for.

" + "documentation":"

The list of account IDs for the Security Hub member accounts to return the details for.

" } } }, @@ -1962,11 +3165,11 @@ "members":{ "Members":{ "shape":"MemberList", - "documentation":"

A list of details about the Security Hub member accounts.

" + "documentation":"

The list of details about the Security Hub member accounts.

" }, "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

A list of account ID and email address pairs of the AWS accounts that couldn't be processed.

" + "documentation":"

The list of AWS accounts that could not be processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -1991,7 +3194,7 @@ "documentation":"

The message of the error made during the BatchImportFindings operation.

" } }, - "documentation":"

Includes details of the list of the findings that can't be imported.

" + "documentation":"

Includes details of the list of the findings that cannot be imported.

" }, "ImportFindingsErrorList":{ "type":"list", @@ -2016,7 +3219,7 @@ }, "Filters":{ "shape":"AwsSecurityFindingFilters", - "documentation":"

One or more attributes used to filter the findings included in the insight. Only findings that match the criteria defined in the filters are included in the insight.

" + "documentation":"

One or more attributes used to filter the findings included in the insight. The insight only includes findings that match the criteria defined in the filters.

" }, "GroupByAttribute":{ "shape":"NonEmptyString", @@ -2122,7 +3325,7 @@ }, "MemberStatus":{ "shape":"NonEmptyString", - "documentation":"

The current status of the association between member and master accounts.

" + "documentation":"

The current status of the association between the member and master accounts.

" } }, "documentation":"

Details about an invitation.

" @@ -2136,7 +3339,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

A list of IDs of the AWS accounts that you want to invite to Security Hub as members.

" + "documentation":"

The list of account IDs of the AWS accounts to invite to Security Hub as members.

" } } }, @@ -2145,7 +3348,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"ResultList", - "documentation":"

A list of account ID and email address pairs of the AWS accounts that couldn't be processed.

" + "documentation":"

The list of AWS accounts that could not be processed. For each account, the list includes the account ID and the email address.

" } } }, @@ -2192,13 +3395,13 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

Paginates results. On your first call to the ListEnabledProductsForImport operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

Paginates results. On your first call to the ListEnabledProductsForImport operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set nextToken in the request to the value of NextToken from the previous response.

", "location":"querystring", "locationName":"NextToken" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of items that you want in the response.

", + "documentation":"

The maximum number of items to return in the response.

", "location":"querystring", "locationName":"MaxResults" } @@ -2209,7 +3412,7 @@ "members":{ "ProductSubscriptions":{ "shape":"ProductSubscriptionArnList", - "documentation":"

A list of ARNs for the resources that represent your subscriptions to products.

" + "documentation":"

The list of ARNs for the resources that represent your subscriptions to products.

" }, "NextToken":{ "shape":"NextToken", @@ -2222,13 +3425,13 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of items that you want in the response.

", + "documentation":"

The maximum number of items to return in the response.

", "location":"querystring", "locationName":"MaxResults" }, "NextToken":{ "shape":"NextToken", - "documentation":"

Paginates results. On your first call to the ListInvitations operation, set the value of this parameter to NULL. For subsequent calls to the operation, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

Paginates results. On your first call to the ListInvitations operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set nextToken in the request to the value of NextToken from the previous response.

", "location":"querystring", "locationName":"NextToken" } @@ -2252,19 +3455,19 @@ "members":{ "OnlyAssociated":{ "shape":"Boolean", - "documentation":"

Specifies which member accounts the response includes based on their relationship status with the master account. The default value is TRUE. If onlyAssociated is set to TRUE, the response includes member accounts whose relationship status with the master is set to ENABLED or DISABLED. If onlyAssociated is set to FALSE, the response includes all existing member accounts.

", + "documentation":"

Specifies which member accounts to include in the response based on their relationship status with the master account. The default value is TRUE.

If onlyAssociated is set to TRUE, the response includes member accounts whose relationship status with the master is set to ENABLED or DISABLED.

If onlyAssociated is set to FALSE, the response includes all existing member accounts.

", "location":"querystring", "locationName":"OnlyAssociated" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of items that you want in the response.

", + "documentation":"

The maximum number of items to return in the response.

", "location":"querystring", "locationName":"MaxResults" }, "NextToken":{ "shape":"NextToken", - "documentation":"

Paginates results. Set the value of this parameter to NULL on your first call to the ListMembers operation. For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

", + "documentation":"

Paginates results. On your first call to the ListMembers operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set nextToken in the request to the value of nextToken from the previous response.

", "location":"querystring", "locationName":"NextToken" } @@ -2304,6 +3507,20 @@ } } }, + "LoadBalancerState":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"NonEmptyString", + "documentation":"

The state code. The initial state of the load balancer is provisioning.

After the load balancer is fully set up and ready to route traffic, its state is active.

If the load balancer could not be set up, its state is failed.

" + }, + "Reason":{ + "shape":"NonEmptyString", + "documentation":"

A description of the state.

" + } + }, + "documentation":"

Information about the state of the load balancer.

" + }, "Malware":{ "type":"structure", "required":["Name"], @@ -2486,6 +3703,10 @@ "type":"string", "pattern":".*\\S.*" }, + "NonEmptyStringList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, "Note":{ "type":"structure", "required":[ @@ -2677,6 +3898,10 @@ "type":"list", "member":{"shape":"RelatedFinding"} }, + "RelatedRequirementsList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, "Remediation":{ "type":"structure", "members":{ @@ -2696,7 +3921,7 @@ "members":{ "Type":{ "shape":"NonEmptyString", - "documentation":"

The type of the resource that details are provided for.

" + "documentation":"

The type of the resource that details are provided for. If possible, set Type to one of the supported resource types. For example, if the resource is an EC2 instance, then set Type to AwsEc2Instance.

If the resource does not match any of the provided types, then set Type to Other.

" }, "Id":{ "shape":"NonEmptyString", @@ -2738,10 +3963,34 @@ "ResourceDetails":{ "type":"structure", "members":{ + "AwsCodeBuildProject":{ + "shape":"AwsCodeBuildProjectDetails", + "documentation":"

Details for an AWS CodeBuild project.

" + }, + "AwsCloudFrontDistribution":{ + "shape":"AwsCloudFrontDistributionDetails", + "documentation":"

Details about a CloudFront distribution.

" + }, "AwsEc2Instance":{ "shape":"AwsEc2InstanceDetails", "documentation":"

Details about an Amazon EC2 instance related to a finding.

" }, + "AwsEc2NetworkInterface":{ + "shape":"AwsEc2NetworkInterfaceDetails", + "documentation":"

Details for an AWS EC2 network interface.

" + }, + "AwsEc2SecurityGroup":{ + "shape":"AwsEc2SecurityGroupDetails", + "documentation":"

Details for an EC2 security group.

" + }, + "AwsElbv2LoadBalancer":{ + "shape":"AwsElbv2LoadBalancerDetails", + "documentation":"

Details about a load balancer.

" + }, + "AwsElasticsearchDomain":{ + "shape":"AwsElasticsearchDomainDetails", + "documentation":"

Details for an Elasticsearch domain.

" + }, "AwsS3Bucket":{ "shape":"AwsS3BucketDetails", "documentation":"

Details about an Amazon S3 Bucket related to a finding.

" @@ -2750,16 +3999,48 @@ "shape":"AwsIamAccessKeyDetails", "documentation":"

Details about an IAM access key related to a finding.

" }, + "AwsIamRole":{ + "shape":"AwsIamRoleDetails", + "documentation":"

Details about an IAM role.

" + }, + "AwsKmsKey":{ + "shape":"AwsKmsKeyDetails", + "documentation":"

Details about a KMS key.

" + }, + "AwsLambdaFunction":{ + "shape":"AwsLambdaFunctionDetails", + "documentation":"

Details about a Lambda function.

" + }, + "AwsLambdaLayerVersion":{ + "shape":"AwsLambdaLayerVersionDetails", + "documentation":"

Details for a Lambda layer version.

" + }, + "AwsRdsDbInstance":{ + "shape":"AwsRdsDbInstanceDetails", + "documentation":"

Details for an RDS database instance.

" + }, + "AwsSnsTopic":{ + "shape":"AwsSnsTopicDetails", + "documentation":"

Details about an SNS topic.

" + }, + "AwsSqsQueue":{ + "shape":"AwsSqsQueueDetails", + "documentation":"

Details about an SQS queue.

" + }, + "AwsWafWebAcl":{ + "shape":"AwsWafWebAclDetails", + "documentation":"

Details for a WAF WebACL.

" + }, "Container":{ "shape":"ContainerDetails", "documentation":"

Details about a container resource related to a finding.

" }, "Other":{ "shape":"FieldMap", - "documentation":"

Details about a resource that doesn't have a specific type defined.

" + "documentation":"

Details about a resource that are not available in a type-specific details object. Use the Other object in the following cases.

  • The type-specific object does not contain all of the fields that you want to populate. In this case, first use the type-specific object to populate those fields. Use the Other object to populate the fields that are missing from the type-specific object.

  • The resource type does not have a corresponding object. This includes resources for which the type is Other.

" } }, - "documentation":"

Additional details about a resource related to a finding.

" + "documentation":"

Additional details about a resource related to a finding.

To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.

If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.

You also use the Other object to populate the details when the selected type does not have a corresponding object.

" }, "ResourceList":{ "type":"list", @@ -2780,19 +4061,23 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

An AWS account ID of the account that wasn't be processed.

" + "documentation":"

An AWS account ID of the account that was not processed.

" }, "ProcessingResult":{ "shape":"NonEmptyString", - "documentation":"

The reason that the account wasn't be processed.

" + "documentation":"

The reason that the account was not processed.

" } }, - "documentation":"

Details about the account that wasn't processed.

" + "documentation":"

Details about the account that was not processed.

" }, "ResultList":{ "type":"list", "member":{"shape":"Result"} }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, "Severity":{ "type":"structure", "required":["Normalized"], @@ -2808,6 +4093,15 @@ }, "documentation":"

The severity of the finding.

" }, + "SeverityRating":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH", + "CRITICAL" + ] + }, "SortCriteria":{ "type":"list", "member":{"shape":"SortCriterion"} @@ -2833,6 +4127,52 @@ "desc" ] }, + "StandardsControl":{ + "type":"structure", + "members":{ + "StandardsControlArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the compliance standard control.

" + }, + "ControlStatus":{ + "shape":"ControlStatus", + "documentation":"

The current status of the compliance standard control. Indicates whether the control is enabled or disabled. Security Hub does not check against disabled controls.

" + }, + "DisabledReason":{ + "shape":"NonEmptyString", + "documentation":"

The reason provided for the most recent change in status for the control.

" + }, + "ControlStatusUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the status of the compliance standard control was most recently updated.

" + }, + "ControlId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the compliance standard control.

" + }, + "Title":{ + "shape":"NonEmptyString", + "documentation":"

The title of the compliance standard control.

" + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

The longer description of the compliance standard control. Provides information about what the control is checking for.

" + }, + "RemediationUrl":{ + "shape":"NonEmptyString", + "documentation":"

A link to remediation information for the control in the Security Hub user documentation

" + }, + "SeverityRating":{ + "shape":"SeverityRating", + "documentation":"

The severity of findings generated from this compliance standard control.

The finding severity is based on an assessment of how easy it would be to compromise AWS resources if the compliance issue is detected.

" + } + }, + "documentation":"

Details for an individual compliance standard control.

" + }, + "StandardsControls":{ + "type":"list", + "member":{"shape":"StandardsControl"} + }, "StandardsInputParameterMap":{ "type":"map", "key":{"shape":"NonEmptyString"}, @@ -2988,30 +4328,30 @@ "members":{ "Type":{ "shape":"ThreatIntelIndicatorType", - "documentation":"

The type of a threat intel indicator.

" + "documentation":"

The type of threat intelligence indicator.

" }, "Value":{ "shape":"NonEmptyString", - "documentation":"

The value of a threat intel indicator.

" + "documentation":"

The value of a threat intelligence indicator.

" }, "Category":{ "shape":"ThreatIntelIndicatorCategory", - "documentation":"

The category of a threat intel indicator.

" + "documentation":"

The category of a threat intelligence indicator.

" }, "LastObservedAt":{ "shape":"NonEmptyString", - "documentation":"

The date and time when the most recent instance of a threat intel indicator was observed.

" + "documentation":"

The date and time when the most recent instance of a threat intelligence indicator was observed.

" }, "Source":{ "shape":"NonEmptyString", - "documentation":"

The source of the threat intel indicator.

" + "documentation":"

The source of the threat intelligence indicator.

" }, "SourceUrl":{ "shape":"NonEmptyString", - "documentation":"

The URL to the page or site where you can get more information about the threat intel indicator.

" + "documentation":"

The URL to the page or site where you can get more information about the threat intelligence indicator.

" } }, - "documentation":"

Details about the threat intel related to a finding.

" + "documentation":"

Details about the threat intelligence related to a finding.

" }, "ThreatIntelIndicatorCategory":{ "type":"string", @@ -3155,6 +4495,31 @@ "members":{ } }, + "UpdateStandardsControlRequest":{ + "type":"structure", + "required":["StandardsControlArn"], + "members":{ + "StandardsControlArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the compliance standard control to enable or disable.

", + "location":"uri", + "locationName":"StandardsControlArn" + }, + "ControlStatus":{ + "shape":"ControlStatus", + "documentation":"

The updated status of the compliance standard control.

" + }, + "DisabledReason":{ + "shape":"NonEmptyString", + "documentation":"

A description of the reason why you are disabling a compliance standard control.

" + } + } + }, + "UpdateStandardsControlResponse":{ + "type":"structure", + "members":{ + } + }, "VerificationState":{ "type":"string", "enum":[ @@ -3164,6 +4529,40 @@ "BENIGN_POSITIVE" ] }, + "WafAction":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"NonEmptyString", + "documentation":"

Specifies how you want AWS WAF to respond to requests that match the settings in a Rule.

Valid settings include the following:

  • ALLOW - AWS WAF allows requests

  • BLOCK - AWS WAF blocks requests

  • COUNT - AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" + } + }, + "documentation":"

Details about the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule.

" + }, + "WafExcludedRule":{ + "type":"structure", + "members":{ + "RuleId":{ + "shape":"NonEmptyString", + "documentation":"

The unique identifier for the rule to exclude from the rule group.

" + } + }, + "documentation":"

Details about a rule to exclude from a rule group.

" + }, + "WafExcludedRuleList":{ + "type":"list", + "member":{"shape":"WafExcludedRule"} + }, + "WafOverrideAction":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"NonEmptyString", + "documentation":"

COUNT overrides the action specified by the individual rule within a RuleGroup .

If set to NONE, the rule's action takes place.

" + } + }, + "documentation":"

Details about an override action for a rule.

" + }, "WorkflowState":{ "type":"string", "enum":[ @@ -3175,5 +4574,5 @@ ] } }, - "documentation":"

Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the compliance status of your environment based on CIS AWS Foundations compliance checks. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the AWS Security Hub User Guide .

When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to. For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the master account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invite was sent from.

" + "documentation":"

Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the compliance status of your environment based on CIS AWS Foundations compliance checks. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the AWS Security Hub User Guide .

When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the master account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

  • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

  • UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

  • All other operations - RateLimit of 10 request per second. BurstLimit of 30 requests per second.

" } diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index 8973e1b6..9a5139e9 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -84,7 +84,7 @@ {"shape":"BadRequestException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon SES API v2 then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport operation to view the results of the test.

" + "documentation":"

Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon SES then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport operation to view the results of the test.

" }, "CreateEmailIdentity":{ "name":"CreateEmailIdentity", @@ -95,12 +95,13 @@ "input":{"shape":"CreateEmailIdentityRequest"}, "output":{"shape":"CreateEmailIdentityResponse"}, "errors":[ + {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"}, {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.

When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.

When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. For some DNS providers, it can take 72 hours or more to complete the domain verification process.

" + "documentation":"

Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.

When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.

When you verify a domain without specifying the DkimSigningAttributes object, this operation provides a set of DKIM tokens. You can convert these tokens into CNAME records, which you then add to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. This verification method is known as Easy DKIM.

Alternatively, you can perform the verification process by providing your own public-private key pair. This verification method is known as Bring Your Own DKIM (BYODKIM). To use BYODKIM, your call to the CreateEmailIdentity operation has to include the DkimSigningAttributes object. When you specify this object, you provide a selector (a component of the DNS record name that identifies the public key that you want to use for DKIM authentication) and a private key.

" }, "DeleteConfigurationSet":{ "name":"DeleteConfigurationSet", @@ -178,7 +179,7 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Used to delete a suppressed email destination from your suppression list.

" + "documentation":"

Removes an email address from the suppression list for your account.

" }, "GetAccount":{ "name":"GetAccount", @@ -282,7 +283,7 @@ {"shape":"LimitExceededException"}, {"shape":"BadRequestException"} ], - "documentation":"

Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

" + "documentation":"

Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

" }, "GetDeliverabilityTestReport":{ "name":"GetDeliverabilityTestReport", @@ -357,7 +358,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Used to fetch a single suppressed email destination from your suppression list.

" + "documentation":"

Retrieves information about a specific email address that's on the suppression list for your account.

" }, "ListConfigurationSets":{ "name":"ListConfigurationSets", @@ -444,7 +445,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Used to fetch a list suppressed email destinations from your suppression list.

" + "documentation":"

Retrieves a list of email addresses that are on the suppression list for your account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -501,7 +502,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Change your account's suppression preferences for your account.

" + "documentation":"

Change the settings for the account-level suppression list.

" }, "PutConfigurationSetDeliveryOptions":{ "name":"PutConfigurationSetDeliveryOptions", @@ -561,7 +562,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Specify your account's suppression preferences for a configuration set.

" + "documentation":"

Specify the account suppression list preferences for a configuration set.

" }, "PutConfigurationSetTrackingOptions":{ "name":"PutConfigurationSetTrackingOptions", @@ -623,7 +624,7 @@ {"shape":"LimitExceededException"}, {"shape":"BadRequestException"} ], - "documentation":"

Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

" + "documentation":"

Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

" }, "PutEmailIdentityDkimAttributes":{ "name":"PutEmailIdentityDkimAttributes", @@ -640,6 +641,21 @@ ], "documentation":"

Used to enable or disable DKIM authentication for an email identity.

" }, + "PutEmailIdentityDkimSigningAttributes":{ + "name":"PutEmailIdentityDkimSigningAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v1/email/identities/{EmailIdentity}/dkim/signing" + }, + "input":{"shape":"PutEmailIdentityDkimSigningAttributesRequest"}, + "output":{"shape":"PutEmailIdentityDkimSigningAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Used to configure or change the DKIM authentication settings for an email domain identity. You can use this operation to do any of the following:

  • Update the signing attributes for an identity that uses Bring Your Own DKIM (BYODKIM).

  • Change from using no DKIM authentication to using Easy DKIM.

  • Change from using no DKIM authentication to using BYODKIM.

  • Change from using Easy DKIM to using BYODKIM.

  • Change from using BYODKIM to using Easy DKIM.

" + }, "PutEmailIdentityFeedbackAttributes":{ "name":"PutEmailIdentityFeedbackAttributes", "http":{ @@ -682,7 +698,7 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Puts (overwrites) an email destination in your suppression list.

" + "documentation":"

Adds an email address to the suppression list for your account.

" }, "SendEmail":{ "name":"SendEmail", @@ -702,7 +718,7 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"} ], - "documentation":"

Sends an email message. You can use the Amazon SES API v2 to send two types of messages:

  • Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and the Amazon SES API v2 assembles the message for you.

  • Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.

" + "documentation":"

Sends an email message. You can use the Amazon SES API v2 to send two types of messages:

  • Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and Amazon SES assembles the message for you.

  • Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.

" }, "TagResource":{ "name":"TagResource", @@ -905,7 +921,7 @@ }, "Charset":{ "shape":"Charset", - "documentation":"

The character set for the content. Because of the constraints of the SMTP protocol, the Amazon SES API v2 uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8, ISO-8859-1, or Shift_JIS.

" + "documentation":"

The character set for the content. Because of the constraints of the SMTP protocol, Amazon SES uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8, ISO-8859-1, or Shift_JIS.

" } }, "documentation":"

An object that represents the content of the email, and optionally a character set specification.

" @@ -1055,6 +1071,10 @@ "Tags":{ "shape":"TagList", "documentation":"

An array of objects that define the tags (keys and values) that you want to associate with the email identity.

" + }, + "DkimSigningAttributes":{ + "shape":"DkimSigningAttributes", + "documentation":"

If your request includes this object, Amazon SES configures the identity to use Bring Your Own DKIM (BYODKIM) for DKIM authentication purposes, as opposed to the default method, Easy DKIM.

You can only specify this object if the email identity is a domain, as opposed to an address.

" } }, "documentation":"

A request to begin the verification process for an email identity (an email address or domain).

" @@ -1072,10 +1092,10 @@ }, "DkimAttributes":{ "shape":"DkimAttributes", - "documentation":"

An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.

" + "documentation":"

An object that contains information about the DKIM attributes for the identity.

" } }, - "documentation":"

If the email identity is a domain, this object contains tokens that you can use to create a set of CNAME records. To sucessfully verify your domain, you have to add these records to the DNS configuration for your domain.

If the email identity is an email address, this object is empty.

" + "documentation":"

If the email identity is a domain, this object contains information about the DKIM verification status for the domain.

If the email identity is an email address, this object is empty.

" }, "CustomRedirectDomain":{ "type":"string", @@ -1128,7 +1148,7 @@ "documentation":"

The name of the dedicated IP pool that the IP address is associated with.

" } }, - "documentation":"

Contains information about a dedicated IP address that is associated with your Amazon SES API v2 account.

To learn more about requesting dedicated IP addresses, see Requesting and Relinquishing Dedicated IP Addresses in the Amazon SES Developer Guide.

" + "documentation":"

Contains information about a dedicated IP address that is associated with your Amazon SES account.

To learn more about requesting dedicated IP addresses, see Requesting and Relinquishing Dedicated IP Addresses in the Amazon SES Developer Guide.

" }, "DedicatedIpList":{ "type":"list", @@ -1230,12 +1250,12 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

The suppressed email destination to delete.

", + "documentation":"

The suppressed email destination to remove from the account suppression list.

", "location":"uri", "locationName":"EmailAddress" } }, - "documentation":"

A request to delete a suppressed email destination.

" + "documentation":"

A request to remove an email address from the suppression list for your account.

" }, "DeleteSuppressedDestinationResponse":{ "type":"structure", @@ -1352,18 +1372,47 @@ }, "Status":{ "shape":"DkimStatus", - "documentation":"

Describes whether or not Amazon SES has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:

  • PENDING – Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them.

  • SUCCESS – Amazon SES located the DKIM records in the DNS configuration for the domain and determined that they're correct. You can now send DKIM-signed email from the identity.

  • FAILED – Amazon SES wasn't able to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them.

  • TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon SES from determining the DKIM status for the domain.

  • NOT_STARTED – Amazon SES hasn't yet started searching for the DKIM records in the DKIM records for the domain.

" + "documentation":"

Describes whether or not Amazon SES has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:

  • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

  • SUCCESS – The verification process completed successfully.

  • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

  • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

  • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

" }, "Tokens":{ "shape":"DnsTokenList", - "documentation":"

A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon SES usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.

" + "documentation":"

If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete.

If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key.

Regardless of the DKIM authentication method you use, Amazon SES searches for the appropriate records in the DNS configuration of the domain for up to 72 hours.

" + }, + "SigningAttributesOrigin":{ + "shape":"DkimSigningAttributesOrigin", + "documentation":"

A string that indicates how DKIM was configured for the identity. There are two possible values:

  • AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM.

  • EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM).

" } }, - "documentation":"

An object that contains information about the DKIM configuration for an email identity.

" + "documentation":"

An object that contains information about the DKIM authentication status for an email identity.

Amazon SES determines the authentication status by searching for specific records in the DNS configuration for the domain. If you used Easy DKIM to set up DKIM authentication, Amazon SES tries to find three unique CNAME records in the DNS configuration for your domain. If you provided a public key to perform DKIM authentication, Amazon SES tries to find a TXT record that uses the selector that you specified. The value of the TXT record must be a public key that's paired with the private key that you specified in the process of creating the identity

" + }, + "DkimSigningAttributes":{ + "type":"structure", + "required":[ + "DomainSigningSelector", + "DomainSigningPrivateKey" + ], + "members":{ + "DomainSigningSelector":{ + "shape":"Selector", + "documentation":"

A string that's used to identify a public key in the DNS configuration for a domain.

" + }, + "DomainSigningPrivateKey":{ + "shape":"PrivateKey", + "documentation":"

A private key that's used to generate a DKIM signature.

The private key must use 1024-bit RSA encryption, and must be encoded using base64 encoding.

" + } + }, + "documentation":"

An object that contains information about the tokens used for setting up Bring Your Own DKIM (BYODKIM).

" + }, + "DkimSigningAttributesOrigin":{ + "type":"string", + "enum":[ + "AWS_SES", + "EXTERNAL" + ] }, "DkimStatus":{ "type":"string", - "documentation":"

The DKIM authentication status of the identity. The status can be one of the following:

  • PENDING – The DKIM verification process was initiated, and Amazon SES hasn't yet detected the CNAME records in the DNS configuration for the domain.

  • SUCCESS – The DKIM authentication process completed successfully.

  • FAILED – The DKIM authentication process failed. This can happen when Amazon SES fails to find the required CNAME records in the DNS configuration of the domain.

  • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

  • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

", + "documentation":"

The DKIM authentication status of the identity. The status can be one of the following:

  • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

  • SUCCESS – The verification process completed successfully.

  • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

  • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

  • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

", "enum":[ "PENDING", "SUCCESS", @@ -1654,7 +1703,7 @@ }, "SuppressionAttributes":{ "shape":"SuppressionAttributes", - "documentation":"

An object that contains information about your account's suppression preferences.

" + "documentation":"

An object that contains information about the email address suppression preferences for your account in the current AWS Region.

" } }, "documentation":"

A list of details about the email-sending capabilities of your Amazon SES account in the current AWS Region.

" @@ -1748,7 +1797,7 @@ }, "SuppressionOptions":{ "shape":"SuppressionOptions", - "documentation":"

An object that contains information about your account's suppression preferences.

" + "documentation":"

An object that contains information about the suppression list preferences for your account.

" } }, "documentation":"

Information about a configuration set.

" @@ -1992,7 +2041,7 @@ }, "DkimAttributes":{ "shape":"DkimAttributes", - "documentation":"

An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.

" + "documentation":"

An object that contains information about the DKIM attributes for the identity.

" }, "MailFromAttributes":{ "shape":"MailFromAttributes", @@ -2011,12 +2060,12 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

Email destination to fetch from the suppression list.

", + "documentation":"

The email address that's on the account suppression list.

", "location":"uri", "locationName":"EmailAddress" } }, - "documentation":"

A request to get a suppressed email destination.

" + "documentation":"

A request to retrieve information about an email address that's on the suppression list for your account.

" }, "GetSuppressedDestinationResponse":{ "type":"structure", @@ -2024,10 +2073,10 @@ "members":{ "SuppressedDestination":{ "shape":"SuppressedDestination", - "documentation":"

An object containing information about the suppressed email destination.

" + "documentation":"

An object containing information about the suppressed email address.

" } }, - "documentation":"

Information about the suppressed email destination.

" + "documentation":"

Information about the suppressed email address.

" }, "Identity":{"type":"string"}, "IdentityInfo":{ @@ -2080,7 +2129,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified request includes an invalid or expired token. Please attempt to get a new token.

", + "documentation":"

The specified request includes an invalid or expired token.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -2343,25 +2392,25 @@ "members":{ "Reasons":{ "shape":"SuppressionListReasons", - "documentation":"

Filters email destinations suppressed by the given reasons.

", + "documentation":"

The factors that caused the email address to be added to .

", "location":"querystring", "locationName":"Reason" }, "StartDate":{ "shape":"Timestamp", - "documentation":"

Filters email destinations suppressed before the given time.

", + "documentation":"

Used to filter the list of suppressed email destinations so that it only includes addresses that were added to the list after a specific date. The date that you specify should be in Unix time format.

", "location":"querystring", "locationName":"StartDate" }, "EndDate":{ "shape":"Timestamp", - "documentation":"

Filters email destinations suppressed after the given time.

", + "documentation":"

Used to filter the list of suppressed email destinations so that it only includes addresses that were added to the list before a specific date. The date that you specify should be in Unix time format.

", "location":"querystring", "locationName":"EndDate" }, "NextToken":{ "shape":"NextToken", - "documentation":"

A token returned from a previous call to ListSuppressedDestinations to indicate the position in the list of suppressed email destinations.

", + "documentation":"

A token returned from a previous call to ListSuppressedDestinations to indicate the position in the list of suppressed email addresses.

", "location":"querystring", "locationName":"NextToken" }, @@ -2372,7 +2421,7 @@ "locationName":"PageSize" } }, - "documentation":"

A request to obtain a list of suppressed email destinations.

" + "documentation":"

A request to obtain a list of email destinations that are on the suppression list for your account.

" }, "ListSuppressedDestinationsResponse":{ "type":"structure", @@ -2383,10 +2432,10 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

A token that indicates that there are additional suppressed destinations to list. To view additional suppressed destinations, issue another request to ListSuppressedDestinations, and pass this token in the NextToken parameter.

" + "documentation":"

A token that indicates that there are additional email addresses on the suppression list for your account. To view additional suppressed addresses, issue another request to ListSuppressedDestinations, and pass this token in the NextToken parameter.

" } }, - "documentation":"

A list of suppressed email destinations.

" + "documentation":"

A list of suppressed email addresses.

" }, "ListTagsForResourceRequest":{ "type":"structure", @@ -2593,6 +2642,13 @@ "type":"string", "documentation":"

The name of a dedicated IP pool.

" }, + "PrivateKey":{ + "type":"string", + "max":20480, + "min":1, + "pattern":"^[a-zA-Z0-9+\\/]+={0,2}$", + "sensitive":true + }, "PutAccountDedicatedIpWarmupAttributesRequest":{ "type":"structure", "members":{ @@ -2630,7 +2686,7 @@ "members":{ "SuppressedReasons":{ "shape":"SuppressionListReasons", - "documentation":"

A list of reasons to suppress email addresses. The only valid reasons are:

  • COMPLAINT – Amazon SES will suppress an email address that receives a complaint.

  • BOUNCE – Amazon SES will suppress an email address that hard bounces.

" + "documentation":"

A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

  • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

  • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

" } }, "documentation":"

A request to change your account's suppression preferences.

" @@ -2720,16 +2776,16 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to enable or disable email sending for.

", + "documentation":"

The name of the configuration set that you want to change the suppression list preferences for.

", "location":"uri", "locationName":"ConfigurationSetName" }, "SuppressedReasons":{ "shape":"SuppressionListReasons", - "documentation":"

A list of reasons to suppress email addresses. The only valid reasons are:

  • COMPLAINT – Amazon SES will suppress an email address that receives a complaint.

  • BOUNCE – Amazon SES will suppress an email address that hard bounces.

" + "documentation":"

A list that contains the reasons that email addresses are automatically added to the suppression list for your account. This list can contain any or all of the following:

  • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

  • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

" } }, - "documentation":"

A request to change your account's suppression preferences for an specific configuration set.

" + "documentation":"

A request to change the account suppression list preferences for a specific configuration set.

" }, "PutConfigurationSetSuppressionOptionsResponse":{ "type":"structure", @@ -2856,6 +2912,44 @@ }, "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" }, + "PutEmailIdentityDkimSigningAttributesRequest":{ + "type":"structure", + "required":[ + "EmailIdentity", + "SigningAttributesOrigin" + ], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

The email identity that you want to configure DKIM for.

", + "location":"uri", + "locationName":"EmailIdentity" + }, + "SigningAttributesOrigin":{ + "shape":"DkimSigningAttributesOrigin", + "documentation":"

The method that you want to use to configure DKIM for the identity. There are two possible values:

  • AWS_SES – Configure DKIM for the identity by using Easy DKIM.

  • EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM).

" + }, + "SigningAttributes":{ + "shape":"DkimSigningAttributes", + "documentation":"

An object that contains information about the private key and selector that you want to use to configure DKIM for the identity. This object is only required if you want to configure Bring Your Own DKIM (BYODKIM) for the identity.

" + } + }, + "documentation":"

A request to change the DKIM attributes for an email identity.

" + }, + "PutEmailIdentityDkimSigningAttributesResponse":{ + "type":"structure", + "members":{ + "DkimStatus":{ + "shape":"DkimStatus", + "documentation":"

The DKIM authentication status of the identity. Amazon SES determines the authentication status by searching for specific records in the DNS configuration for your domain. If you used Easy DKIM to set up DKIM authentication, Amazon SES tries to find three unique CNAME records in the DNS configuration for your domain.

If you provided a public key to perform DKIM authentication, Amazon SES tries to find a TXT record that uses the selector that you specified. The value of the TXT record must be a public key that's paired with the private key that you specified in the process of creating the identity.

The status can be one of the following:

  • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

  • SUCCESS – The verification process completed successfully.

  • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

  • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

  • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

" + }, + "DkimTokens":{ + "shape":"DnsTokenList", + "documentation":"

If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete.

If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector that's associated with your public key.

Regardless of the DKIM authentication method you use, Amazon SES searches for the appropriate records in the DNS configuration of the domain for up to 72 hours.

" + } + }, + "documentation":"

If the action is successful, the service sends back an HTTP 200 response.

The following data is returned in JSON format by the service.

" + }, "PutEmailIdentityFeedbackAttributesRequest":{ "type":"structure", "required":["EmailIdentity"], @@ -2915,14 +3009,14 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

Email destination to be suppressed.

" + "documentation":"

The email address that should be added to the suppression list for your account.

" }, "Reason":{ "shape":"SuppressionListReason", - "documentation":"

Reason for which the email destination is suppressed.

" + "documentation":"

The factors that should cause the email address to be added to the suppression list for your account.

" } }, - "documentation":"

A request to suppress an email destination.

" + "documentation":"

A request to add an email destination to the suppression list for your account.

" }, "PutSuppressedDestinationResponse":{ "type":"structure", @@ -2936,10 +3030,10 @@ "members":{ "Data":{ "shape":"RawMessageData", - "documentation":"

The raw email message. The message has to meet the following criteria:

  • The message has to contain a header and a body, separated by one blank line.

  • All of the required header fields must be present in the message.

  • Each part of a multipart MIME message must be formatted properly.

  • Attachments must be in a file format that the Amazon SES API v2 supports.

  • The entire message must be Base64 encoded.

  • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

  • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

" + "documentation":"

The raw email message. The message has to meet the following criteria:

  • The message has to contain a header and a body, separated by one blank line.

  • All of the required header fields must be present in the message.

  • Each part of a multipart MIME message must be formatted properly.

  • Attachments must be in a file format that the Amazon SES supports.

  • The entire message must be Base64 encoded.

  • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

  • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

" } }, - "documentation":"

The raw email message.

" + "documentation":"

Represents the raw content of an email message.

" }, "RawMessageData":{ "type":"blob", @@ -2971,6 +3065,12 @@ }, "documentation":"

Enable or disable collection of reputation metrics for emails that you send using this configuration set in the current AWS Region.

" }, + "Selector":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9]))$" + }, "SendEmailRequest":{ "type":"structure", "required":[ @@ -3014,7 +3114,7 @@ "members":{ "MessageId":{ "shape":"OutboundMessageId", - "documentation":"

A unique identifier for the message that is generated when the message is accepted.

It is possible for the Amazon SES API v2 to accept a message without sending it. This can happen when the message that you're trying to send has an attachment contains a virus, or when you send a templated email that contains invalid personalization content, for example.

" + "documentation":"

A unique identifier for the message that is generated when the message is accepted.

It's possible for Amazon SES to accept a message without sending it. This can happen when the message that you're trying to send has an attachment contains a virus, or when you send a templated email that contains invalid personalization content, for example.

" } }, "documentation":"

A unique message ID that you receive when an email is accepted for sending.

" @@ -3082,36 +3182,36 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

The suppressed email destination.

" + "documentation":"

The email address that is on the suppression list for your account.

" }, "Reason":{ "shape":"SuppressionListReason", - "documentation":"

The reason for which the email destination is suppressed.

" + "documentation":"

The reason that the address was added to the suppression list for your account.

" }, "LastUpdateTime":{ "shape":"Timestamp", - "documentation":"

The last time the suppressed destination was updated.

" + "documentation":"

The date and time when the suppressed destination was last updated, shown in Unix time format.

" }, "Attributes":{ "shape":"SuppressedDestinationAttributes", - "documentation":"

Optional value with information about the sources of the suppression.

" + "documentation":"

An optional value that can contain additional information about the reasons that the address was added to the suppression list for your account.

" } }, - "documentation":"

An object containing information about the suppressed email destination.

" + "documentation":"

An object that contains information about an email address that is on the suppression list for your account.

" }, "SuppressedDestinationAttributes":{ "type":"structure", "members":{ "MessageId":{ "shape":"OutboundMessageId", - "documentation":"

A unique identifier of the message that caused the suppression of the email destination.

" + "documentation":"

The unique identifier of the email message that caused the email address to be added to the suppression list for your account.

" }, "FeedbackId":{ "shape":"FeedbackId", - "documentation":"

A unique identifier of the suppression cause.

" + "documentation":"

A unique identifier that's generated when an email address is added to the suppression list for your account.

" } }, - "documentation":"

An object containing additional attributes related to a suppressed destination.

" + "documentation":"

An object that contains additional attributes that are related an email address that is on the suppression list for your account.

" }, "SuppressedDestinationSummaries":{ "type":"list", @@ -3127,32 +3227,32 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

The suppressed email destination.

" + "documentation":"

The email address that's on the suppression list for your account.

" }, "Reason":{ "shape":"SuppressionListReason", - "documentation":"

The reason for which the email destination is suppressed.

" + "documentation":"

The reason that the address was added to the suppression list for your account.

" }, "LastUpdateTime":{ "shape":"Timestamp", - "documentation":"

The last time the suppressed destination was updated.

" + "documentation":"

The date and time when the suppressed destination was last updated, shown in Unix time format.

" } }, - "documentation":"

A summary for the suppressed email destination.

" + "documentation":"

A summary that describes the suppressed email address.

" }, "SuppressionAttributes":{ "type":"structure", "members":{ "SuppressedReasons":{ "shape":"SuppressionListReasons", - "documentation":"

A list of reasons to suppress email addresses. The only valid reasons are:

  • COMPLAINT – Amazon SES will suppress an email address that receives a complaint.

  • BOUNCE – Amazon SES will suppress an email address that hard bounces.

" + "documentation":"

A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

  • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

  • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

" } }, - "documentation":"

An object that contains information about your account's suppression preferences.

" + "documentation":"

An object that contains information about the email address suppression preferences for your account in the current AWS Region.

" }, "SuppressionListReason":{ "type":"string", - "documentation":"

A string representing the cause for suppression for an email destination. It can be one of the following:

  • COMPLAINT – Amazon SES will suppress an email address that receive a complaint.

  • BOUNCE – Amazon SES will suppress an email address that hard bounces.

", + "documentation":"

The reason that the address was added to the suppression list for your account. The value can be one of the following:

  • COMPLAINT – Amazon SES added an email address to the suppression list for your account because a message sent to that address results in a complaint.

  • BOUNCE – Amazon SES added an email address to the suppression list for your account because a message sent to that address results in a hard bounce.

", "enum":[ "BOUNCE", "COMPLAINT" @@ -3167,10 +3267,10 @@ "members":{ "SuppressedReasons":{ "shape":"SuppressionListReasons", - "documentation":"

A list of reasons to suppress email addresses. The only valid reasons are:

  • COMPLAINT – Amazon SES will suppress an email address that receives a complaint.

  • BOUNCE – Amazon SES will suppress an email address that hard bounces.

" + "documentation":"

A list that contains the reasons that email addresses are automatically added to the suppression list for your account. This list can contain any or all of the following:

  • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

  • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

" } }, - "documentation":"

An object that contains information about your account's suppression preferences.

" + "documentation":"

An object that contains information about the suppression list preferences for your account.

" }, "Tag":{ "type":"structure", diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index a09ca072..35d82c90 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -71,7 +71,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Registers your on-premises server or virtual machine with Amazon EC2 so that you can manage these resources using Run Command. An on-premises server or virtual machine that has been registered with EC2 is called a managed instance. For more information about activations, see Setting Up AWS Systems Manager for Hybrid Environments.

" + "documentation":"

Generates an activation code and activation ID you can use to register your on-premises server or virtual machine (VM) with Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises instances and VMs using Systems Manager, see Setting Up AWS Systems Manager for Hybrid Environments in the AWS Systems Manager User Guide.

On-premises servers or VMs that are registered with Systems Manager and Amazon EC2 instances that you manage with Systems Manager are all called managed instances.

" }, "CreateAssociation":{ "name":"CreateAssociation", @@ -197,7 +197,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"

A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple AWS Regions to a single Amazon S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

You can configure Systems Manager Explorer to use the SyncToDestination type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single Amazon S3 bucket. You can also configure Explorer to use the SyncFromSource type. This type synchronizes OpsItems and OpsData from multiple AWS accounts and Regions by using AWS Organizations. For more information, see Setting Up Explorer to Display Data from Multiple Accounts and Regions in the AWS Systems Manager User Guide.

A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

" + "documentation":"

A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple AWS Regions to a single Amazon S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple AWS accounts and Regions or EntireOrganization by using AWS Organizations. For more information, see Setting Up Explorer to Display Data from Multiple Accounts and Regions in the AWS Systems Manager User Guide.

A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

" }, "DeleteActivation":{ "name":"DeleteActivation", @@ -1249,7 +1249,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Lists the associations for the specified Systems Manager document or instance.

" + "documentation":"

Returns all State Manager associations in the current AWS account and Region. You can limit the results to a specific State Manager association document or instance by specifying a filter.

" }, "ListCommandInvocations":{ "name":"ListCommandInvocations", @@ -1345,7 +1345,7 @@ {"shape":"InvalidNextToken"}, {"shape":"InvalidFilterKey"} ], - "documentation":"

Describes one or more of your Systems Manager documents.

" + "documentation":"

Returns all Systems Manager (SSM) documents in the current AWS account and Region. You can limit the results of this request by using a filter.

" }, "ListInventoryEntries":{ "name":"ListInventoryEntries", @@ -2699,7 +2699,7 @@ }, "Values":{ "shape":"AttachmentsSourceValues", - "documentation":"

The value of a key-value pair that identifies the location of an attachment to a document. The format is the URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.

" + "documentation":"

The value of a key-value pair that identifies the location of an attachment to a document. The format for Value depends on the type of key you specify.

  • For the key SourceUrl, the value is an S3 bucket location. For example:

    \"Values\": [ \"s3://my-bucket/my-folder\" ]

  • For the key S3FileUrl, the value is a file in an S3 bucket. For example:

    \"Values\": [ \"s3://my-bucket/my-folder/my-file.py\" ]

  • For the key AttachmentReference, the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:

    \"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]

    However, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:

    \"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]

" }, "Name":{ "shape":"AttachmentIdentifier", @@ -2712,7 +2712,8 @@ "type":"string", "enum":[ "SourceUrl", - "S3FileUrl" + "S3FileUrl", + "AttachmentReference" ] }, "AttachmentsSourceList":{ @@ -2897,7 +2898,8 @@ "CurrentAction", "StartTimeBefore", "StartTimeAfter", - "AutomationType" + "AutomationType", + "TagKey" ] }, "AutomationExecutionFilterList":{ @@ -2947,11 +2949,11 @@ }, "AutomationExecutionStatus":{ "shape":"AutomationExecutionStatus", - "documentation":"

The status of the execution. Valid values include: Running, Succeeded, Failed, Timed out, or Cancelled.

" + "documentation":"

The status of the execution.

" }, "ExecutionStartTime":{ "shape":"DateTime", - "documentation":"

The time the execution started.>

" + "documentation":"

The time the execution started.

" }, "ExecutionEndTime":{ "shape":"DateTime", @@ -3171,8 +3173,7 @@ "CloudWatchLogGroupName":{ "type":"string", "max":512, - "min":1, - "pattern":"[\\.\\-_/#A-Za-z0-9]+" + "min":1 }, "CloudWatchOutputConfig":{ "type":"structure", @@ -3793,15 +3794,15 @@ "members":{ "Description":{ "shape":"ActivationDescription", - "documentation":"

A user-defined description of the resource that you want to register with Amazon EC2.

Do not enter personally identifiable information in this field.

" + "documentation":"

A user-defined description of the resource that you want to register with Systems Manager.

Do not enter personally identifiable information in this field.

" }, "DefaultInstanceName":{ "shape":"DefaultInstanceName", - "documentation":"

The name of the registered, managed instance as it will appear in the Amazon EC2 console or when you use the AWS command line tools to list EC2 resources.

Do not enter personally identifiable information in this field.

" + "documentation":"

The name of the registered, managed instance as it will appear in the Systems Manager console or when you use the AWS command line tools to list Systems Manager resources.

Do not enter personally identifiable information in this field.

" }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance.

" + "documentation":"

The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance. This IAM role must provide AssumeRole permissions for the Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM Service Role for a Hybrid Environment in the AWS Systems Manager User Guide.

" }, "RegistrationLimit":{ "shape":"RegistrationLimit", @@ -3928,7 +3929,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. If you use the parameter InstanceId, you cannot use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" + "documentation":"

The instance ID.

InstanceId has been deprecated. To specify an instance ID for an association, use the Targets parameter. Requests that include the parameter InstanceID with SSM documents that use schema version 2.0 or later will fail. In addition, if you use the parameter InstanceId, you cannot use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. To use these parameters, you must use the Targets parameter.

" }, "Parameters":{ "shape":"Parameters", @@ -4006,11 +4007,11 @@ }, "DocumentType":{ "shape":"DocumentType", - "documentation":"

The type of document to create. Valid document types include: Command, Policy, Automation, Session, and Package.

" + "documentation":"

The type of document to create.

" }, "DocumentFormat":{ "shape":"DocumentFormat", - "documentation":"

Specify the document format for the request. The document format can be either JSON or YAML. JSON is the default format.

" + "documentation":"

Specify the document format for the request. The document format can be JSON, YAML, or TEXT. JSON is the default format.

" }, "TargetType":{ "shape":"TargetType", @@ -5594,7 +5595,7 @@ }, "InstancesWithInstalledPendingRebootPatches":{ "shape":"InstancesCount", - "documentation":"

Reserved for future use.

", + "documentation":"

The number of instances with patches installed by Patch Manager that have not been rebooted after the patch installation. The status of these instances is NON_COMPLIANT.

", "box":true }, "InstancesWithInstalledRejectedPatches":{ @@ -6183,7 +6184,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Error returned when the ID specified for a resource, such as a maintenance window or Patch baseline, doesn't exist.

For information about resource limits in Systems Manager, see AWS Systems Manager Limits.

", + "documentation":"

Error returned when the ID specified for a resource, such as a maintenance window or Patch baseline, doesn't exist.

For information about resource quotas in Systems Manager, see Systems Manager Service Quotas in the AWS General Reference.

", "exception":true }, "DryRun":{"type":"boolean"}, @@ -7754,7 +7755,7 @@ }, "InstalledPendingRebootCount":{ "shape":"PatchInstalledPendingRebootCount", - "documentation":"

Reserved for future use.

", + "documentation":"

The number of patches installed by Patch Manager since the last time the instance was rebooted.

", "box":true }, "InstalledRejectedCount":{ @@ -7793,11 +7794,11 @@ }, "LastNoRebootInstallOperationTime":{ "shape":"DateTime", - "documentation":"

Reserved for future use.

" + "documentation":"

The time of the last attempt to patch the instance with NoReboot specified as the reboot option.

" }, "RebootOption":{ "shape":"RebootOption", - "documentation":"

Reserved for future use.

" + "documentation":"

Indicates the reboot option specified in the patch baseline.

Reboot options apply to Install operations only. Reboots are not attempted for Patch Manager Scan operations.

  • RebootIfNeeded: Patch Manager tries to reboot the instance if it installed any patches, or if any patches are detected with a status of InstalledPendingReboot.

  • NoReboot: Patch Manager attempts to install missing packages without trying to reboot the system. Patches installed with this option are assigned a status of InstalledPendingReboot. These patches might not be in effect until a reboot is performed.

" } }, "documentation":"

Defines the high-level patch compliance state for a managed instance, providing information about the number of installed, missing, not applicable, and failed patches along with metadata about the operation when this information was gathered for the instance.

" @@ -8400,7 +8401,7 @@ }, "Type":{ "shape":"InventoryQueryOperatorType", - "documentation":"

The type of filter. Valid values include the following: \"Equal\"|\"NotEqual\"|\"BeginWith\"|\"LessThan\"|\"GreaterThan\"

" + "documentation":"

The type of filter.

" } }, "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" @@ -9575,6 +9576,7 @@ "shape":"Comment", "documentation":"

Information about the commands to run.

" }, + "CloudWatchOutputConfig":{"shape":"CloudWatchOutputConfig"}, "DocumentHash":{ "shape":"DocumentHash", "documentation":"

The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated.

" @@ -9583,6 +9585,10 @@ "shape":"DocumentHashType", "documentation":"

SHA-256 or SHA-1. SHA-1 hashes have been deprecated.

" }, + "DocumentVersion":{ + "shape":"DocumentVersion", + "documentation":"

The SSM document version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you run commands by using the AWS CLI, then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:

--document-version \"\\$DEFAULT\"

--document-version \"\\$LATEST\"

--document-version \"3\"

" + }, "NotificationConfig":{ "shape":"NotificationConfig", "documentation":"

Configurations for sending notifications about command status changes on a per-instance basis.

" @@ -10356,7 +10362,7 @@ "LimitType":{"shape":"String"}, "Message":{"shape":"String"} }, - "documentation":"

The request caused OpsItems to exceed one or more limits. For information about OpsItem limits, see What are the resource limits for OpsCenter?.

", + "documentation":"

The request caused OpsItems to exceed one or more quotas. For information about OpsItem quotas, see What are the resource limits for OpsCenter?.

", "exception":true }, "OpsItemMaxResults":{ @@ -11278,10 +11284,7 @@ }, "PatchRule":{ "type":"structure", - "required":[ - "PatchFilterGroup", - "ApproveAfterDays" - ], + "required":["PatchFilterGroup"], "members":{ "PatchFilterGroup":{ "shape":"PatchFilterGroup", @@ -11296,6 +11299,11 @@ "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released.

", "box":true }, + "ApproveUntilDate":{ + "shape":"PatchStringDate", + "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date will be installed automatically

", + "box":true + }, "EnableNonSecurity":{ "shape":"Boolean", "documentation":"

For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is 'false'. Applies to Linux instances only.

", @@ -11397,6 +11405,12 @@ }, "documentation":"

Information about the approval status of a patch.

" }, + "PatchStringDate":{ + "type":"string", + "max":10, + "min":1, + "pattern":"^(\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01]))$" + }, "PatchTitle":{"type":"string"}, "PatchUnreportedNotApplicableCount":{"type":"integer"}, "PatchVendor":{"type":"string"}, @@ -11532,7 +11546,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an AWS Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-/

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for the fully qualified parameter name is 1011 characters, including the full length of the parameter ARN. For example, the following fully qualified parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" + "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an AWS Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-/

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" }, "Description":{ "shape":"ParameterDescription", @@ -12120,7 +12134,7 @@ }, "AwsOrganizationsSource":{ "shape":"ResourceDataSyncAwsOrganizationsSource", - "documentation":"

The field name in SyncSource for the ResourceDataSyncAwsOrganizationsSource type.

" + "documentation":"

Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from AWS Organizations.

" }, "SourceRegions":{ "shape":"ResourceDataSyncSourceRegionList", @@ -12197,7 +12211,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Error returned when the caller has exceeded the default resource limits. For example, too many maintenance windows or patch baselines have been created.

For information about resource limits in Systems Manager, see AWS Systems Manager Limits.

", + "documentation":"

Error returned when the caller has exceeded the default resource quotas. For example, too many maintenance windows or patch baselines have been created.

For information about resource quotas in Systems Manager, see Systems Manager Service Quotas in the AWS General Reference.

", "exception":true }, "ResourceType":{ @@ -12260,7 +12274,7 @@ }, "StreamUrl":{ "shape":"StreamUrl", - "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager Service Endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" } } }, @@ -12788,6 +12802,10 @@ "shape":"TargetLocations", "documentation":"

A location is a combination of AWS Regions and/or AWS accounts where you want to run the Automation. Use this action to start an Automation in multiple Regions and multiple accounts. For more information, see Executing Automations in Multiple AWS Regions and Accounts in the AWS Systems Manager User Guide.

", "box":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key name/value pairs:

  • Key=environment,Value=test

  • Key=OS,Value=Windows

To add tags to an existing patch baseline, use the AddTagsToResource action.

" } } }, @@ -12831,7 +12849,7 @@ }, "StreamUrl":{ "shape":"StreamUrl", - "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager Service Endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" } } }, @@ -12892,7 +12910,7 @@ }, "StepStatus":{ "shape":"AutomationExecutionStatus", - "documentation":"

The execution status for this step. Valid values include: Pending, InProgress, Success, Cancelled, Failed, and TimedOut.

" + "documentation":"

The execution status for this step.

" }, "ResponseCode":{ "shape":"String", diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index 6b9a6554..7fd54ca2 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -3083,6 +3083,7 @@ "VMWARE", "HYPER-V", "EC2", + "KVM", "OTHER" ] }, diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json index 9223f31b..965af489 100644 --- a/botocore/data/transcribe/2017-10-26/service-2.json +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -29,6 +29,22 @@ ], "documentation":"

Creates a new custom vocabulary that you can use to change the way Amazon Transcribe handles transcription of an audio file.

" }, + "CreateVocabularyFilter":{ + "name":"CreateVocabularyFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVocabularyFilterRequest"}, + "output":{"shape":"CreateVocabularyFilterResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates a new vocabulary filter that you can use to filter words, such as profane words, from the output of a transcription job.

" + }, "DeleteTranscriptionJob":{ "name":"DeleteTranscriptionJob", "http":{ @@ -58,6 +74,21 @@ ], "documentation":"

Deletes a vocabulary from Amazon Transcribe.

" }, + "DeleteVocabularyFilter":{ + "name":"DeleteVocabularyFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVocabularyFilterRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Removes a vocabulary filter.

" + }, "GetTranscriptionJob":{ "name":"GetTranscriptionJob", "http":{ @@ -90,6 +121,22 @@ ], "documentation":"

Gets information about a vocabulary.

" }, + "GetVocabularyFilter":{ + "name":"GetVocabularyFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetVocabularyFilterRequest"}, + "output":{"shape":"GetVocabularyFilterResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Returns information about a vocabulary filter.

" + }, "ListTranscriptionJobs":{ "name":"ListTranscriptionJobs", "http":{ @@ -120,6 +167,21 @@ ], "documentation":"

Returns a list of vocabularies that match the specified criteria. If no criteria are specified, returns the entire list of vocabularies.

" }, + "ListVocabularyFilters":{ + "name":"ListVocabularyFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVocabularyFiltersRequest"}, + "output":{"shape":"ListVocabularyFiltersResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Gets information about vocabulary filters.

" + }, "StartTranscriptionJob":{ "name":"StartTranscriptionJob", "http":{ @@ -152,6 +214,22 @@ {"shape":"ConflictException"} ], "documentation":"

Updates an existing vocabulary with new values. The UpdateVocabulary operation overwrites all of the existing information with the values that you provide in the request.

" + }, + "UpdateVocabularyFilter":{ + "name":"UpdateVocabularyFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVocabularyFilterRequest"}, + "output":{"shape":"UpdateVocabularyFilterResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Updates a vocabulary filter with a new list of filtered words.

" } }, "shapes":{ @@ -172,6 +250,48 @@ "documentation":"

When you are using the CreateVocabulary operation, the JobName field is a duplicate of a previously entered job name. Resend your request with a different name.

When you are using the UpdateVocabulary operation, there are two jobs running at the same time. Resend the second request later.

", "exception":true }, + "CreateVocabularyFilterRequest":{ + "type":"structure", + "required":[ + "VocabularyFilterName", + "LanguageCode" + ], + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The vocabulary filter name. The name must be unique within the account that contains it.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the words in the vocabulary filter. All words in the filter must be in the same language. The vocabulary filter can only be used with transcription jobs in the specified language.

" + }, + "Words":{ + "shape":"Words", + "documentation":"

The words to use in the vocabulary filter. Only use characters from the character set defined for custom vocabularies. For a list of character sets, see Character Sets for Custom Vocabularies.

If you provide a list of words in the Words parameter, you can't use the VocabularyFilterFileUri parameter.

" + }, + "VocabularyFilterFileUri":{ + "shape":"Uri", + "documentation":"

The Amazon S3 location of a text file used as input to create the vocabulary filter. Only use characters from the character set defined for custom vocabularies. For a list of character sets, see Character Sets for Custom Vocabularies.

The specified file must be less than 50 KB of UTF-8 characters.

If you provide the location of a list of words in the VocabularyFilterFileUri parameter, you can't use the Words parameter.

" + } + } + }, + "CreateVocabularyFilterResponse":{ + "type":"structure", + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the words in the collection.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time that the vocabulary filter was modified.

" + } + } + }, "CreateVocabularyRequest":{ "type":"structure", "required":[ @@ -222,6 +342,10 @@ } } }, + "DataAccessRoleArn":{ + "type":"string", + "pattern":"^arn:aws:iam::[0-9]{0,63}:role/[A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, "DateTime":{"type":"timestamp"}, "DeleteTranscriptionJobRequest":{ "type":"structure", @@ -233,6 +357,16 @@ } } }, + "DeleteVocabularyFilterRequest":{ + "type":"structure", + "required":["VocabularyFilterName"], + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter to remove.

" + } + } + }, "DeleteVocabularyRequest":{ "type":"structure", "required":["VocabularyName"], @@ -263,6 +397,37 @@ } } }, + "GetVocabularyFilterRequest":{ + "type":"structure", + "required":["VocabularyFilterName"], + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter for which to return information.

" + } + } + }, + "GetVocabularyFilterResponse":{ + "type":"structure", + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the words in the vocabulary filter.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time that the contents of the vocabulary filter were updated.

" + }, + "DownloadUri":{ + "shape":"Uri", + "documentation":"

The URI of the list of words in the vocabulary filter. You can use this URI to get the list of words.

" + } + } + }, "GetVocabularyRequest":{ "type":"structure", "required":["VocabularyName"], @@ -311,6 +476,20 @@ "exception":true, "fault":true }, + "JobExecutionSettings":{ + "type":"structure", + "members":{ + "AllowDeferredExecution":{ + "shape":"Boolean", + "documentation":"

Indicates whether a job should be queued by Amazon Transcribe when the concurrent execution limit is exceeded. When the AllowDeferredExecution field is true, jobs are queued and will be executed when the number of executing jobs falls below the concurrent execution limit. If the field is false, Amazon Transcribe returns a LimitExceededException exception.

If you specify the AllowDeferredExecution field, you must specify the DataAccessRoleArn field.

" + }, + "DataAccessRoleArn":{ + "shape":"DataAccessRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role that has access to the S3 bucket that contains the input files. Amazon Transcribe will assume this role to read queued media files. If you have specified an output S3 bucket for the transcription results, this role should have access to the output bucket as well.

If you specify the AllowDeferredExecution field, you must specify the DataAccessRoleArn field.

" + } + }, + "documentation":"

Provides information about when a transcription job should be executed.

" + }, "KMSKeyId":{ "type":"string", "max":2048, @@ -437,6 +616,36 @@ } } }, + "ListVocabularyFiltersRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request to ListVocabularyFilters was truncated, include the NextToken to fetch the next set of collections.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of filters to return in the response. If there are fewer results in the list, this response contains only the actual results.

" + }, + "NameContains":{ + "shape":"VocabularyFilterName", + "documentation":"

Filters the response so that it only contains vocabulary filters whose name contains the specified string.

" + } + } + }, + "ListVocabularyFiltersResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The ListVocabularyFilters operation returns a page of collections at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the NextPage token. Include the token in the next request to the ListVocabularyFilters operation to return in the next page of jobs.

" + }, + "VocabularyFilters":{ + "shape":"VocabularyFilters", + "documentation":"

The list of vocabulary filters. It will contain at most MaxResults number of filters. If there are more filters, call the ListVocabularyFilters operation again with the NextToken parameter in the request set to the value of the NextToken field in the response.

" + } + } + }, "MaxAlternatives":{ "type":"integer", "max":10, @@ -537,6 +746,14 @@ "MaxAlternatives":{ "shape":"MaxAlternatives", "documentation":"

The number of alternative transcriptions that the service should return. If you specify the MaxAlternatives field, you must set the ShowAlternatives field to true.

" + }, + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter to use when transcribing the audio. The filter that you specify must have the same language code as the transcription job.

" + }, + "VocabularyFilterMethod":{ + "shape":"VocabularyFilterMethod", + "documentation":"

Set to mask to remove filtered text from the transcript and replace it with three asterisks (\"***\") as placeholder text. Set to remove to remove filtered text from the transcript without using placeholder text.

" } }, "documentation":"

Provides optional settings for the StartTranscriptionJob operation.

" @@ -580,6 +797,10 @@ "Settings":{ "shape":"Settings", "documentation":"

A Settings object that provides optional settings for a transcription job.

" + }, + "JobExecutionSettings":{ + "shape":"JobExecutionSettings", + "documentation":"

Provides information about how a transcription job is executed. Use this field to indicate that the job can be queued for deferred execution if the concurrency limit is reached and there are no slots available to immediately run the job.

" } } }, @@ -634,6 +855,10 @@ "shape":"Transcript", "documentation":"

An object that describes the output of the transcription job.

" }, + "StartTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows with the job was started processing.

" + }, "CreationTime":{ "shape":"DateTime", "documentation":"

A timestamp that shows when the job was created.

" @@ -649,6 +874,10 @@ "Settings":{ "shape":"Settings", "documentation":"

Optional settings for the transcription job. Use these settings to turn on speaker recognition, to set the maximum number of speakers that should be identified and to specify a custom vocabulary to use when processing the transcription job.

" + }, + "JobExecutionSettings":{ + "shape":"JobExecutionSettings", + "documentation":"

Provides information about how a transcription job is executed.

" } }, "documentation":"

Describes an asynchronous transcription job that was created with the StartTranscriptionJob operation.

" @@ -662,6 +891,7 @@ "TranscriptionJobStatus":{ "type":"string", "enum":[ + "QUEUED", "IN_PROGRESS", "FAILED", "COMPLETED" @@ -682,6 +912,10 @@ "shape":"DateTime", "documentation":"

A timestamp that shows when the job was created.

" }, + "StartTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job started processing.

" + }, "CompletionTime":{ "shape":"DateTime", "documentation":"

A timestamp that shows when the job was completed.

" @@ -705,6 +939,41 @@ }, "documentation":"

Provides a summary of information about a transcription job.

" }, + "UpdateVocabularyFilterRequest":{ + "type":"structure", + "required":["VocabularyFilterName"], + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter to update.

" + }, + "Words":{ + "shape":"Words", + "documentation":"

The words to use in the vocabulary filter. Only use characters from the character set defined for custom vocabularies. For a list of character sets, see Character Sets for Custom Vocabularies.

If you provide a list of words in the Words parameter, you can't use the VocabularyFilterFileUri parameter.

" + }, + "VocabularyFilterFileUri":{ + "shape":"Uri", + "documentation":"

The Amazon S3 location of a text file used as input to create the vocabulary filter. Only use characters from the character set defined for custom vocabularies. For a list of character sets, see Character Sets for Custom Vocabularies.

The specified file must be less than 50 KB of UTF-8 characters.

If you provide the location of a list of words in the VocabularyFilterFileUri parameter, you can't use the Words parameter.

" + } + } + }, + "UpdateVocabularyFilterResponse":{ + "type":"structure", + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the updated vocabulary filter.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the words in the vocabulary filter.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time that the vocabulary filter was updated.

" + } + } + }, "UpdateVocabularyRequest":{ "type":"structure", "required":[ @@ -761,6 +1030,41 @@ "type":"list", "member":{"shape":"VocabularyInfo"} }, + "VocabularyFilterInfo":{ + "type":"structure", + "members":{ + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter. The name must be unique in the account that holds the filter.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the words in the vocabulary filter.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time that the vocabulary was last updated.

" + } + }, + "documentation":"

Provides information about a vocabulary filter.

" + }, + "VocabularyFilterMethod":{ + "type":"string", + "enum":[ + "remove", + "mask" + ] + }, + "VocabularyFilterName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z._-]+" + }, + "VocabularyFilters":{ + "type":"list", + "member":{"shape":"VocabularyFilterInfo"} + }, "VocabularyInfo":{ "type":"structure", "members":{ @@ -796,6 +1100,16 @@ "READY", "FAILED" ] + }, + "Word":{ + "type":"string", + "max":256, + "min":1 + }, + "Words":{ + "type":"list", + "member":{"shape":"Word"}, + "min":1 } }, "documentation":"

Operations and objects for transcribing speech to text.

" diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index dfc84fcc..63b3b0b2 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -107,7 +107,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the server that you specify by passing the ServerId parameter.

The response contains a description of the server's properties.

" + "documentation":"

Describes the server that you specify by passing the ServerId parameter.

The response contains a description of the server's properties. When you set EndpointType to VPC, the response will contain the EndpointDetails.

" }, "DescribeUser":{ "name":"DescribeUser", @@ -280,6 +280,7 @@ "output":{"shape":"UpdateServerResponse"}, "errors":[ {"shape":"ServiceUnavailableException"}, + {"shape":"ConflictException"}, {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"}, {"shape":"ResourceExistsException"}, @@ -307,22 +308,36 @@ } }, "shapes":{ + "AddressAllocationId":{"type":"string"}, + "AddressAllocationIds":{ + "type":"list", + "member":{"shape":"AddressAllocationId"} + }, "Arn":{ "type":"string", "max":1600, "min":20, "pattern":"arn:.*" }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

This exception is thrown when the UpdatServer is called for a server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.

", + "exception":true + }, "CreateServerRequest":{ "type":"structure", "members":{ "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. This parameter is required when you specify a value for the EndpointType parameter.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server to resources only within your VPC. To control incoming internet traffic, you will need to invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that you want your SFTP server to connect to. If you connect to a VPC endpoint, your SFTP server isn't accessible over the public internet.

" + "documentation":"

The type of VPC endpoint that you want your SFTP server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, you can restrict access to your SFTP server and resources only within your VPC.

" }, "HostKey":{ "shape":"HostKey", @@ -374,7 +389,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

" + "documentation":"

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in S3, the entry will be ignored. As a workaround, you can use the S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", @@ -620,17 +635,30 @@ "EndpointDetails":{ "type":"structure", "members":{ + "AddressAllocationIds":{ + "shape":"AddressAllocationIds", + "documentation":"

A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This is only valid in the UpdateServer API.

This property can only be use when EndpointType is set to VPC.

" + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

A list of subnet IDs that are required to host your SFTP server endpoint in your VPC.

" + }, "VpcEndpointId":{ "shape":"VpcEndpointId", "documentation":"

The ID of the VPC endpoint.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted.

" } }, - "documentation":"

The configuration settings for the virtual private cloud (VPC) endpoint for your SFTP server.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" }, "EndpointType":{ "type":"string", "enum":[ "PUBLIC", + "VPC", "VPC_ENDPOINT" ] }, @@ -1106,6 +1134,11 @@ } } }, + "SubnetId":{"type":"string"}, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, "Tag":{ "type":"structure", "required":[ @@ -1238,7 +1271,7 @@ "members":{ "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, your SFTP server isn't accessible over the public internet.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" }, "EndpointType":{ "shape":"EndpointType", @@ -1289,7 +1322,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

" + "documentation":"

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in S3, the entry will be ignored. As a workaround, you can use the S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", @@ -1348,7 +1381,8 @@ "max":22, "min":22, "pattern":"^vpce-[0-9a-f]{17}$" - } + }, + "VpcId":{"type":"string"} }, "documentation":"

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and set up.

" } diff --git a/botocore/data/translate/2017-07-01/service-2.json b/botocore/data/translate/2017-07-01/service-2.json index 22360d03..520bfb03 100644 --- a/botocore/data/translate/2017-07-01/service-2.json +++ b/botocore/data/translate/2017-07-01/service-2.json @@ -27,6 +27,21 @@ ], "documentation":"

A synchronous action that deletes a custom terminology.

" }, + "DescribeTextTranslationJob":{ + "name":"DescribeTextTranslationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTextTranslationJobRequest"}, + "output":{"shape":"DescribeTextTranslationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with an asycnhronous batch translation job including name, ID, status, source and target languages, input/output S3 buckets, and so on.

" + }, "GetTerminology":{ "name":"GetTerminology", "http":{ @@ -74,6 +89,54 @@ ], "documentation":"

Provides a list of custom terminologies associated with your account.

" }, + "ListTextTranslationJobs":{ + "name":"ListTextTranslationJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTextTranslationJobsRequest"}, + "output":{"shape":"ListTextTranslationJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of the batch translation jobs that you have submitted.

" + }, + "StartTextTranslationJob":{ + "name":"StartTextTranslationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTextTranslationJobRequest"}, + "output":{"shape":"StartTextTranslationJobResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedLanguagePairException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous batch translation job. Batch translation jobs can be used to translate large volumes of text across multiple documents at once. For more information, see async.

Batch translation jobs can be described with the DescribeTextTranslationJob operation, listed with the ListTextTranslationJobs operation, and stopped with the StopTextTranslationJob operation.

Amazon Translate does not support batch translation of multiple source languages at once.

" + }, + "StopTextTranslationJob":{ + "name":"StopTextTranslationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopTextTranslationJobRequest"}, + "output":{"shape":"StopTextTranslationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops an asynchronous batch translation job that is in progress.

If the job's state is IN_PROGRESS, the job will be marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state. Otherwise, the job is put into the STOPPED state.

Asynchronous batch translation jobs are started with the StartTextTranslationJob operation. You can use the DescribeTextTranslationJob or ListTextTranslationJobs operations to get a batch translation job's JobId.

" + }, "TranslateText":{ "name":"TranslateText", "http":{ @@ -92,7 +155,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Translates input text from the source language to the target language. It is not necessary to use English (en) as either the source or the target language but not all language combinations are supported by Amazon Translate. For more information, see Supported Language Pairs.

  • Arabic (ar)

  • Chinese (Simplified) (zh)

  • Chinese (Traditional) (zh-TW)

  • Czech (cs)

  • Danish (da)

  • Dutch (nl)

  • English (en)

  • Finnish (fi)

  • French (fr)

  • German (de)

  • Hebrew (he)

  • Indonesian (id)

  • Italian (it)

  • Japanese (ja)

  • Korean (ko)

  • Polish (pl)

  • Portuguese (pt)

  • Russian (ru)

  • Spanish (es)

  • Swedish (sv)

  • Turkish (tr)

To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto, Amazon Translate will call Amazon Comprehend to determine the source language.

" + "documentation":"

Translates input text from the source language to the target language. For a list of available languages and language codes, see what-is-languages.

" } }, "shapes":{ @@ -120,6 +183,17 @@ "min":1, "pattern":"[\\P{M}\\p{M}]{1,5000}" }, + "ClientTokenString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ContentType":{ + "type":"string", + "max":256, + "pattern":"^[-\\w.]+\\/[-\\w.+]+$" + }, "DeleteTerminologyRequest":{ "type":"structure", "required":["Name"], @@ -130,6 +204,25 @@ } } }, + "DescribeTextTranslationJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Translate generated for the job. The StartTextTranslationJob operation returns this identifier in its response.

" + } + } + }, + "DescribeTextTranslationJobResponse":{ + "type":"structure", + "members":{ + "TextTranslationJobProperties":{ + "shape":"TextTranslationJobProperties", + "documentation":"

An object that contains the properties associated with an asynchronous batch translation job.

" + } + } + }, "Description":{ "type":"string", "max":256, @@ -169,7 +262,7 @@ "type":"string", "max":400, "min":1, - "pattern":"(arn:aws((-us-gov)|(-cn))?:kms:)?([a-z]{2}-[a-z]+-\\d:)?(\\d{12}:)?(((key/)?[a-zA-Z0-9-_]+)|(alias/[a-zA-Z0-9:/_-]+))" + "pattern":"(arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:kms:)?([a-z]{2}-[a-z]+(-[a-z]+)?-\\d:)?(\\d{12}:)?(((key/)?[a-zA-Z0-9-_]+)|(alias/[a-zA-Z0-9:/_-]+))" }, "EncryptionKeyType":{ "type":"string", @@ -205,6 +298,12 @@ } } }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + }, "ImportTerminologyRequest":{ "type":"structure", "required":[ @@ -244,16 +343,42 @@ } } }, + "InputDataConfig":{ + "type":"structure", + "required":[ + "S3Uri", + "ContentType" + ], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The URI of the AWS S3 folder that contains the input file. The folder must be in the same Region as the API endpoint you are calling.

" + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

The multipurpose internet mail extension (MIME) type of the input files. Valid values are text/plain for plaintext files and text/html for HTML files.

" + } + }, + "documentation":"

The input configuration properties for requesting a batch translation job.

" + }, "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", "members":{ "Message":{"shape":"String"} }, - "documentation":"

An internal server error occurred. Retry your request.

", + "documentation":"

An internal server error occurred. Retry your request.

", "exception":true, "fault":true }, + "InvalidFilterException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The filter specified for the operation is invalid. Specify a different filter.

", + "exception":true + }, "InvalidParameterValueException":{ "type":"structure", "members":{ @@ -270,6 +395,48 @@ "documentation":"

The request that you made is invalid. Check your request to determine why it's invalid and then retry the request.

", "exception":true }, + "JobDetails":{ + "type":"structure", + "members":{ + "TranslatedDocumentsCount":{ + "shape":"Integer", + "documentation":"

The number of documents successfully processed during a translation job.

" + }, + "DocumentsWithErrorsCount":{ + "shape":"Integer", + "documentation":"

The number of documents that could not be processed during a translation job.

" + }, + "InputDocumentsCount":{ + "shape":"Integer", + "documentation":"

The number of documents used as input in a translation job.

" + } + }, + "documentation":"

The number of documents successfully and unsuccessfully processed during a translation job.

" + }, + "JobId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "JobName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "JobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "COMPLETED_WITH_ERROR", + "FAILED", + "STOP_REQUESTED", + "STOPPED" + ] + }, "LanguageCodeString":{ "type":"string", "max":5, @@ -309,7 +476,37 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the response to the ListTerminologies was truncated, the NextToken fetches the next group of custom terminologies.

" + "documentation":"

If the response to the ListTerminologies was truncated, the NextToken fetches the next group of custom terminologies.

" + } + } + }, + "ListTextTranslationJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"TextTranslationJobFilter", + "documentation":"

The parameters that specify which batch translation jobs to retrieve. Filters include job name, job status, and submission time. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to request the next page of results.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default value is 100.

" + } + } + }, + "ListTextTranslationJobsResponse":{ + "type":"structure", + "members":{ + "TextTranslationJobPropertiesList":{ + "shape":"TextTranslationJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retreive the next page of results. This value is null when there are no more results to return.

" } } }, @@ -327,6 +524,17 @@ "max":8192, "pattern":"\\p{ASCII}{0,8192}" }, + "OutputDataConfig":{ + "type":"structure", + "required":["S3Uri"], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The URI of the S3 folder that contains a translation job's output file. The folder must be in the same Region as the API endpoint that you are calling.

" + } + }, + "documentation":"

The output configuration properties for a batch translation job.

" + }, "ResourceName":{ "type":"string", "max":256, @@ -342,22 +550,116 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The resource you are looking for has not been found. Review the resource you're looking for and see if a different resource will accomplish your needs before retrying the revised request. .

", + "documentation":"

The resource you are looking for has not been found. Review the resource you're looking for and see if a different resource will accomplish your needs before retrying the revised request.

", "exception":true }, + "S3Uri":{ + "type":"string", + "max":1024, + "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + }, "ServiceUnavailableException":{ "type":"structure", "members":{ "Message":{"shape":"String"} }, "documentation":"

The Amazon Translate service is temporarily unavailable. Please wait a bit and then retry your request.

", - "exception":true + "exception":true, + "fault":true + }, + "StartTextTranslationJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "SourceLanguageCode", + "TargetLanguageCodes", + "ClientToken" + ], + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

The name of the batch translation job to be performed.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and S3 location of the input documents for the translation job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies the S3 folder to which your job output will be saved.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity Access and Management (IAM) role that grants Amazon Translate read access to your input data. For more nformation, see identity-and-access-management.

" + }, + "SourceLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

The language code of the input language. For a list of language codes, see what-is-languages.

Amazon Translate does not automatically detect a source language during batch translation jobs.

" + }, + "TargetLanguageCodes":{ + "shape":"TargetLanguageCodeStringList", + "documentation":"

The language code of the output language.

" + }, + "TerminologyNames":{ + "shape":"ResourceNameList", + "documentation":"

The name of the terminology to use in the batch translation job. For a list of available terminologies, use the ListTerminologies operation.

" + }, + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

The client token of the EC2 instance calling the request. This token is auto-generated when using the Amazon Translate SDK. Otherwise, use the DescribeInstances EC2 operation to retreive an instance's client token. For more information, see Client Tokens in the EC2 User Guide.

", + "idempotencyToken":true + } + } + }, + "StartTextTranslationJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this ID with the DescribeTextTranslationJob operation.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the job. Possible values include:

  • SUBMITTED - The job has been received and is queued for processing.

  • IN_PROGRESS - Amazon Translate is processing the job.

  • COMPLETED - The job was successfully completed and the output is available.

  • COMPLETED_WITH_ERRORS - The job was completed with errors. The errors can be analyzed in the job's output.

  • FAILED - The job did not complete. To get details, use the DescribeTextTranslationJob operation.

  • STOP_REQUESTED - The user who started the job has requested that it be stopped.

  • STOPPED - The job has been stopped.

" + } + } + }, + "StopTextTranslationJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The job ID of the job to be stopped.

" + } + } + }, + "StopTextTranslationJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The job ID of the stopped batch translation job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the designated job. Upon successful completion, the job's status will be STOPPED.

" + } + } }, "String":{ "type":"string", "max":10000, "pattern":"[\\P{M}\\p{M}]{0,10000}" }, + "TargetLanguageCodeStringList":{ + "type":"list", + "member":{"shape":"LanguageCodeString"}, + "max":1, + "min":1 + }, "Term":{ "type":"structure", "members":{ @@ -378,7 +680,7 @@ }, "TerminologyArn":{ "type":"string", - "pattern":"^arn:aws((-us-gov)|(-cn))?:translate:[a-zA-Z0-9-]+:[0-9]{12}:terminology/.+?/.+?$" + "pattern":"^arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:translate:[a-zA-Z0-9-]+:[0-9]{12}:terminology/.+?/.+?$" }, "TerminologyData":{ "type":"structure", @@ -389,7 +691,7 @@ "members":{ "File":{ "shape":"TerminologyFile", - "documentation":"

The file containing the custom terminology data.

" + "documentation":"

The file containing the custom terminology data. Your version of the AWS SDK performs a Base64-encoding on this field before sending a request to the AWS service. Users of the SDK should not perform Base64-encoding themselves.

" }, "Format":{ "shape":"TerminologyDataFormat", @@ -486,6 +788,90 @@ "documentation":"

The size of the text you submitted exceeds the size limit. Reduce the size of the text or use a smaller document and then retry your request.

", "exception":true }, + "TextTranslationJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters the list of jobs by name.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based by job status.

" + }, + "SubmittedBeforeTime":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing and returns only the jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmittedAfterTime":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing and returns only the jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of translation jobs. For more information, see ListTextTranslationJobs.

" + }, + "TextTranslationJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The ID of the translation job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The user-defined name of the translation job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the translation job.

" + }, + "JobDetails":{ + "shape":"JobDetails", + "documentation":"

The number of documents successfully and unsuccessfully processed during the translation job.

" + }, + "SourceLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

The language code of the language of the source text. The language must be a language supported by Amazon Translate.

" + }, + "TargetLanguageCodes":{ + "shape":"TargetLanguageCodeStringList", + "documentation":"

The language code of the language of the target text. The language must be a language supported by Amazon Translate.

" + }, + "TerminologyNames":{ + "shape":"ResourceNameList", + "documentation":"

A list containing the names of the terminologies applied to a translation job. Only one terminology can be applied per StartTextTranslationJob request at this time.

" + }, + "Message":{ + "shape":"UnboundedLengthString", + "documentation":"

An explanation of any errors that may have occured during the translation job.

" + }, + "SubmittedTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the translation job was submitted.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the translation job ended.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input configuration properties that were specified when the job was requested.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output configuration properties that were specified when the job was requested.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity Access and Management (IAM) role that granted Amazon Translate read access to the job's input data.

" + } + }, + "documentation":"

Provides information about a translation job.

" + }, + "TextTranslationJobPropertiesList":{ + "type":"list", + "member":{"shape":"TextTranslationJobProperties"} + }, "Timestamp":{"type":"timestamp"}, "TooManyRequestsException":{ "type":"structure", @@ -509,11 +895,11 @@ }, "TerminologyNames":{ "shape":"ResourceNameList", - "documentation":"

The TerminologyNames list that is taken as input to the TranslateText request. This has a minimum length of 0 and a maximum length of 1.

" + "documentation":"

The name of the terminology list file to be used in the TranslateText request. You can use 1 terminology list at most in a TranslateText request. Terminology lists can contain a maximum of 256 terms.

" }, "SourceLanguageCode":{ "shape":"LanguageCodeString", - "documentation":"

The language code for the language of the source text. The language must be a language supported by Amazon Translate.

To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto, Amazon Translate will call Amazon Comprehend to determine the source language.

" + "documentation":"

The language code for the language of the source text. The language must be a language supported by Amazon Translate. For a list of language codes, see what-is-languages.

To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto, Amazon Translate will call Amazon Comprehend to determine the source language.

" }, "TargetLanguageCode":{ "shape":"LanguageCodeString", @@ -531,11 +917,11 @@ "members":{ "TranslatedText":{ "shape":"String", - "documentation":"

The the translated text. The maximum length of this text is 5kb.

" + "documentation":"

The translated text.

" }, "SourceLanguageCode":{ "shape":"LanguageCodeString", - "documentation":"

The language code for the language of the source text.

" + "documentation":"

The language code for the language of the source text.

" }, "TargetLanguageCode":{ "shape":"LanguageCodeString", @@ -547,6 +933,7 @@ } } }, + "UnboundedLengthString":{"type":"string"}, "UnsupportedLanguagePairException":{ "type":"structure", "members":{ diff --git a/botocore/data/workmail/2017-10-01/service-2.json b/botocore/data/workmail/2017-10-01/service-2.json index b9ca6278..20ad1a2f 100644 --- a/botocore/data/workmail/2017-10-01/service-2.json +++ b/botocore/data/workmail/2017-10-01/service-2.json @@ -67,7 +67,8 @@ {"shape":"MailDomainNotFoundException"}, {"shape":"MailDomainStateException"}, {"shape":"OrganizationNotFoundException"}, - {"shape":"OrganizationStateException"} + {"shape":"OrganizationStateException"}, + {"shape":"LimitExceededException"} ], "documentation":"

Adds an alias to the set of a given member (user or group) of Amazon WorkMail.

", "idempotent":true @@ -485,6 +486,19 @@ "documentation":"

Returns summaries of the organization's resources.

", "idempotent":true }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the tags applied to an Amazon WorkMail organization resource.

" + }, "ListUsers":{ "name":"ListUsers", "http":{ @@ -565,6 +579,34 @@ "documentation":"

Allows the administrator to reset the password for a user.

", "idempotent":true }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"}, + {"shape":"OrganizationStateException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon WorkMail organization resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon WorkMail organization resource.

" + }, "UpdateMailboxQuota":{ "name":"UpdateMailboxQuota", "http":{ @@ -637,6 +679,11 @@ "type":"list", "member":{"shape":"EmailAddress"} }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "AssociateDelegateToResourceRequest":{ "type":"structure", "required":[ @@ -1080,6 +1127,10 @@ "ErrorMessage":{ "shape":"String", "documentation":"

(Optional) The error message indicating if unexpected behavior was encountered with regards to the organization.

" + }, + "ARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the organization.

" } } }, @@ -1125,7 +1176,7 @@ }, "State":{ "shape":"EntityState", - "documentation":"

The state of the resource: enabled (registered to Amazon WorkMail) or disabled (deregistered or never registered to WorkMail).

" + "documentation":"

The state of the resource: enabled (registered to Amazon WorkMail), disabled (deregistered or never registered to WorkMail), or deleted.

" }, "EnabledDate":{ "shape":"Timestamp", @@ -1401,6 +1452,14 @@ "documentation":"

The supplied password doesn't match the minimum security constraints, such as length or use of special characters.

", "exception":true }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request exceeds the limit of the resource.

", + "exception":true + }, "ListAliasesRequest":{ "type":"structure", "required":[ @@ -1641,6 +1700,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The resource ARN.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key-value pairs.

" + } + } + }, "ListUsersRequest":{ "type":"structure", "required":["OrganizationId"], @@ -1996,6 +2074,14 @@ "min":1, "pattern":"[\\w\\-.]+(@[a-zA-Z0-9.\\-]+\\.[a-zA-Z0-9]{2,})?" }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The resource cannot be found.

", + "exception":true + }, "ResourceType":{ "type":"string", "enum":[ @@ -2011,7 +2097,77 @@ "type":"string", "max":256 }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

Describes a tag applied to a resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The resource ARN.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tag key-value pairs.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "Timestamp":{"type":"timestamp"}, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The resource can have up to 50 user-applied tags.

", + "exception":true + }, "UnsupportedOperationException":{ "type":"structure", "members":{ @@ -2020,6 +2176,28 @@ "documentation":"

You can't perform a write operation against a read-only directory.

", "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The resource ARN.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateMailboxQuotaRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index c8842523..fbbb2b5e 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -371,6 +371,24 @@ ], "documentation":"

Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).

The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" }, + "MigrateWorkspace":{ + "name":"MigrateWorkspace", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MigrateWorkspaceRequest"}, + "output":{"shape":"MigrateWorkspaceResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"OperationInProgressException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

Migrates a WorkSpace from one operating system or bundle type to another, while retaining the data on the user volume.

The migration process recreates the WorkSpace by using a new root volume from the target bundle image and the user volume from the last available snapshot of the original WorkSpace. During migration, the original D:\\Users\\%USERNAME% user profile folder is renamed to D:\\Users\\%USERNAME%MMddyyTHHmmss%.NotMigrated. A new D:\\Users\\%USERNAME%\\ folder is generated by the new OS. Certain files in the old user profile are moved to the new user profile.

For available migration scenarios, details about what happens during migration, and best practices, see Migrate a WorkSpace.

" + }, "ModifyAccount":{ "name":"ModifyAccount", "http":{ @@ -430,7 +448,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Specifies which devices and operating systems users can use to access their Workspaces. For more information, see Control Device Access.

" + "documentation":"

Specifies which devices and operating systems users can use to access their WorkSpaces. For more information, see Control Device Access.

" }, "ModifyWorkspaceCreationProperties":{ "name":"ModifyWorkspaceCreationProperties", @@ -499,7 +517,7 @@ }, "input":{"shape":"RebuildWorkspacesRequest"}, "output":{"shape":"RebuildWorkspacesResult"}, - "documentation":"

Rebuilds the specified WorkSpace.

You cannot rebuild a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY.

Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.

This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.

" + "documentation":"

Rebuilds the specified WorkSpace.

You cannot rebuild a WorkSpace unless its state is AVAILABLE, ERROR, UNHEALTHY, or STOPPED.

Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.

This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.

" }, "RegisterWorkspaceDirectory":{ "name":"RegisterWorkspaceDirectory", @@ -534,7 +552,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Restores the specified WorkSpace to its last known healthy state.

You cannot restore a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY.

Restoring a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Restore a WorkSpace.

This operation is asynchronous and returns before the WorkSpace is completely restored.

" + "documentation":"

Restores the specified WorkSpace to its last known healthy state.

You cannot restore a WorkSpace unless its state is AVAILABLE, ERROR, UNHEALTHY, or STOPPED.

Restoring a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Restore a WorkSpace.

This operation is asynchronous and returns before the WorkSpace is completely restored.

" }, "RevokeIpRules":{ "name":"RevokeIpRules", @@ -857,7 +875,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The tags. Each WorkSpaces resource can have a maximum of 50 tags.

" + "documentation":"

The tags. Each WorkSpaces resource can have a maximum of 50 tags. If you want to add new tags to a set of existing tags, you must submit all of the existing tags along with the new ones.

" } } }, @@ -1552,6 +1570,36 @@ "max":5, "min":1 }, + "MigrateWorkspaceRequest":{ + "type":"structure", + "required":[ + "SourceWorkspaceId", + "BundleId" + ], + "members":{ + "SourceWorkspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The identifier of the WorkSpace to migrate from.

" + }, + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the target bundle type to migrate the WorkSpace to.

" + } + } + }, + "MigrateWorkspaceResult":{ + "type":"structure", + "members":{ + "SourceWorkspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The original identifier of the WorkSpace that is being migrated.

" + }, + "TargetWorkspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The new identifier of the WorkSpace that is being migrated. If the migration does not succeed, the target WorkSpace ID will not be used, and the WorkSpace will still have the original WorkSpace ID.

" + } + } + }, "ModificationResourceEnum":{ "type":"string", "enum":[ @@ -2342,7 +2390,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"

The KMS key used to encrypt data stored on your WorkSpace.

" + "documentation":"

The symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs.

" }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", @@ -2548,7 +2596,7 @@ }, "WorkspaceAccessProperties":{ "shape":"WorkspaceAccessProperties", - "documentation":"

The devices and operating systems that users can use to access Workspaces.

" + "documentation":"

The devices and operating systems that users can use to access WorkSpaces.

" }, "Tenancy":{ "shape":"Tenancy", @@ -2729,7 +2777,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"

The KMS key used to encrypt data stored on your WorkSpace.

" + "documentation":"

The symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs.

" }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", diff --git a/botocore/data/xray/2016-04-12/service-2.json b/botocore/data/xray/2016-04-12/service-2.json index 9271ab6f..7f53ad20 100644 --- a/botocore/data/xray/2016-04-12/service-2.json +++ b/botocore/data/xray/2016-04-12/service-2.json @@ -177,7 +177,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottledException"} ], - "documentation":"

Retrieves a document that describes services that process incoming requests, and downstream services that they call as a result. Root services process incoming requests and make calls to downstream services. Root services are applications that use the AWS X-Ray SDK. Downstream services can be other applications, AWS resources, HTTP web APIs, or SQL databases.

" + "documentation":"

Retrieves a document that describes services that process incoming requests, and downstream services that they call as a result. Root services process incoming requests and make calls to downstream services. Root services are applications that use the AWS X-Ray SDK. Downstream services can be other applications, AWS resources, HTTP web APIs, or SQL databases.

" }, "GetTimeSeriesServiceStatistics":{ "name":"GetTimeSeriesServiceStatistics", @@ -219,7 +219,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottledException"} ], - "documentation":"

Retrieves IDs and metadata for traces available for a specified time frame using an optional filter. To get the full traces, pass the trace IDs to BatchGetTraces.

A filter expression can target traced requests that hit specific service nodes or edges, have errors, or come from a known user. For example, the following filter expression targets traces that pass through api.example.com:

service(\"api.example.com\")

This filter expression finds traces that have an annotation named account with the value 12345:

annotation.account = \"12345\"

For a full list of indexed fields and keywords that you can use in filter expressions, see Using Filter Expressions in the AWS X-Ray Developer Guide.

" + "documentation":"

Retrieves IDs and annotations for traces available for a specified time frame using an optional filter. To get the full traces, pass the trace IDs to BatchGetTraces.

A filter expression can target traced requests that hit specific service nodes or edges, have errors, or come from a known user. For example, the following filter expression targets traces that pass through api.example.com:

service(\"api.example.com\")

This filter expression finds traces that have an annotation named account with the value 12345:

annotation.account = \"12345\"

For a full list of indexed fields and keywords that you can use in filter expressions, see Using Filter Expressions in the AWS X-Ray Developer Guide.

" }, "PutEncryptionConfig":{ "name":"PutEncryptionConfig", @@ -261,7 +261,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottledException"} ], - "documentation":"

Uploads segment documents to AWS X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.

Segments must include the following fields. For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide.

Required Segment Document Fields

  • name - The name of the service that handled the request.

  • id - A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.

  • trace_id - A unique identifier that connects all segments and subsegments originating from a single client request.

  • start_time - Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010 or 1.480615200010E9.

  • end_time - Time the segment or subsegment was closed. For example, 1480615200.090 or 1.480615200090E9. Specify either an end_time or in_progress.

  • in_progress - Set to true instead of specifying an end_time to record that a segment has been started, but is not complete. Send an in progress segment when your application receives a request that will take a long time to serve, to trace the fact that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.

A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:

Trace ID Format

  • The version number, i.e. 1.

  • The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal.

  • A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.

" + "documentation":"

Uploads segment documents to AWS X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.

Segments must include the following fields. For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide.

Required Segment Document Fields

  • name - The name of the service that handled the request.

  • id - A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.

  • trace_id - A unique identifier that connects all segments and subsegments originating from a single client request.

  • start_time - Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010 or 1.480615200010E9.

  • end_time - Time the segment or subsegment was closed. For example, 1480615200.090 or 1.480615200090E9. Specify either an end_time or in_progress.

  • in_progress - Set to true instead of specifying an end_time to record that a segment has been started, but is not complete. Send an in progress segment when your application receives a request that will take a long time to serve, to trace the fact that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.

A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:

Trace ID Format

  • The version number, i.e. 1.

  • The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal.

  • A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.

" }, "UpdateGroup":{ "name":"UpdateGroup", @@ -409,7 +409,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -426,7 +426,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -854,7 +854,7 @@ "members":{ "NextToken":{ "shape":"GetGroupsNextToken", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -867,7 +867,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -876,7 +876,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -889,7 +889,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -898,7 +898,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -911,7 +911,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -967,7 +967,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -992,7 +992,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -1029,7 +1029,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -1046,7 +1046,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -1060,7 +1060,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -1073,7 +1073,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination token. Not used.

" + "documentation":"

Pagination token.

" } } }, @@ -1119,7 +1119,7 @@ "members":{ "TraceSummaries":{ "shape":"TraceSummaryList", - "documentation":"

Trace IDs and metadata for traces that were found in the specified time frame.

" + "documentation":"

Trace IDs and annotations for traces that were found in the specified time frame.

" }, "ApproximateTime":{ "shape":"Timestamp", @@ -1275,7 +1275,7 @@ "members":{ "KeyId":{ "shape":"EncryptionKeyId", - "documentation":"

An AWS KMS customer master key (CMK) in one of the following formats:

  • Alias - The name of the key. For example, alias/MyKey.

  • Key ID - The KMS key ID of the key. For example, ae4aa6d49-a4d8-9df9-a475-4ff6d7898456.

  • ARN - The full Amazon Resource Name of the key ID or alias. For example, arn:aws:kms:us-east-2:123456789012:key/ae4aa6d49-a4d8-9df9-a475-4ff6d7898456. Use this format to specify a key in a different account.

Omit this key if you set Type to NONE.

" + "documentation":"

An AWS KMS customer master key (CMK) in one of the following formats:

  • Alias - The name of the key. For example, alias/MyKey.

  • Key ID - The KMS key ID of the key. For example, ae4aa6d49-a4d8-9df9-a475-4ff6d7898456. AWS X-Ray does not support asymmetric CMKs.

  • ARN - The full Amazon Resource Name of the key ID or alias. For example, arn:aws:kms:us-east-2:123456789012:key/ae4aa6d49-a4d8-9df9-a475-4ff6d7898456. Use this format to specify a key in a different account.

Omit this key if you set Type to NONE.

" }, "Type":{ "shape":"EncryptionType", @@ -2021,11 +2021,11 @@ }, "HasFault":{ "shape":"NullableBoolean", - "documentation":"

One or more of the segment documents has a 500 series error.

" + "documentation":"

The root segment document has a 500 series error.

" }, "HasError":{ "shape":"NullableBoolean", - "documentation":"

One or more of the segment documents has a 400 series error.

" + "documentation":"

The root segment document has a 400 series error.

" }, "HasThrottle":{ "shape":"NullableBoolean", diff --git a/botocore/exceptions.py b/botocore/exceptions.py index 52baa04b..3c42cafb 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -484,6 +484,15 @@ class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError): ) +class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError): + """Error for invalid s3 us-east-1 regional endpoints configuration""" + fmt = ( + 'S3 us-east-1 regional endpoint option ' + '{s3_us_east_1_regional_endpoint_config} is ' + 'invaild. Valid options are: legacy and regional' + ) + + class InvalidSTSRegionalEndpointsConfigError(BotoCoreError): """Error when invalid sts regional endpoints configuration is specified""" fmt = ( diff --git a/botocore/parsers.py b/botocore/parsers.py index 1452900f..9df114db 100644 --- a/botocore/parsers.py +++ b/botocore/parsers.py @@ -782,7 +782,7 @@ class BaseRestParser(ResponseParser): metadata['RequestId'] = headers['x-amzn-requestid'] elif 'x-amz-request-id' in headers: metadata['RequestId'] = headers['x-amz-request-id'] - # HostId is what it's called whenver this value is returned + # HostId is what it's called whenever this value is returned # in an XML response body, so to be consistent, we'll always # call is HostId. metadata['HostId'] = headers.get('x-amz-id-2', '') diff --git a/botocore/session.py b/botocore/session.py index fe7e6bb3..3382658b 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -22,7 +22,6 @@ import os import platform import socket import warnings -import collections from botocore import __version__ from botocore import UNSIGNED @@ -49,6 +48,7 @@ from botocore import waiter from botocore import retryhandler, translate from botocore import utils from botocore.utils import EVENT_ALIASES +from botocore.compat import MutableMapping logger = logging.getLogger(__name__) @@ -941,7 +941,7 @@ class ComponentLocator(object): pass -class SessionVarDict(collections.MutableMapping): +class SessionVarDict(MutableMapping): def __init__(self, session, session_vars): self._session = session self._store = copy.copy(session_vars) diff --git a/botocore/signers.py b/botocore/signers.py index 5a7fab7d..93ce8dd9 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -718,9 +718,13 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, def _should_use_global_endpoint(client): - use_dualstack_endpoint = False - if client.meta.config.s3 is not None: - use_dualstack_endpoint = client.meta.config.s3.get( - 'use_dualstack_endpoint', False) - return (client.meta.partition == 'aws' and - not use_dualstack_endpoint) + if client.meta.partition != 'aws': + return False + s3_config = client.meta.config.s3 + if s3_config: + if s3_config.get('use_dualstack_endpoint', False): + return False + if s3_config.get('us_east_1_regional_endpoint') == 'regional' and \ + client.meta.config.region_name == 'us-east-1': + return False + return True diff --git a/docs/source/conf.py b/docs/source/conf.py index a3020e94..60fcb999 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.13.' +version = '1.14.' # The full version, including alpha/beta/rc tags. -release = '1.13.37' +release = '1.14.14' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.cfg b/setup.cfg index 31445a97..9de942eb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,16 +3,11 @@ universal = 1 [metadata] requires-dist = - python-dateutil>=2.1,<2.7.0; python_version=="2.6" - python-dateutil>=2.1,<2.8.1; python_version>="2.7" + python-dateutil>=2.1,<3.0.0 jmespath>=0.7.1,<1.0.0 docutils>=0.10,<0.16 - ordereddict==1.1; python_version=="2.6" - simplejson==3.3.0; python_version=="2.6" - urllib3>=1.20,<1.23; python_version=="3.3" - urllib3>=1.20,<1.24; python_version=="2.6" - urllib3>=1.20,<1.26; python_version=="2.7" - urllib3>=1.20,<1.26; python_version>="3.4" + urllib3>=1.20,<1.25.8; python_version=='3.4' + urllib3>=1.20,<1.26; python_version!='3.4' [egg_info] tag_build = diff --git a/setup.py b/setup.py index c62ab875..edc35789 100644 --- a/setup.py +++ b/setup.py @@ -23,33 +23,21 @@ def find_version(*file_paths): raise RuntimeError("Unable to find version string.") -requires = ['jmespath>=0.7.1,<1.0.0', - 'docutils>=0.10,<0.16'] +requires = [ + 'jmespath>=0.7.1,<1.0.0', + 'docutils>=0.10,<0.16', + 'python-dateutil>=2.1,<3.0.0', +] -if sys.version_info[:2] == (2, 6): - # For python2.6 we have a few other dependencies. - # First we need an ordered dictionary so we use the - # 2.6 backport. - requires.append('ordereddict==1.1') - # Then we need simplejson. This is because we need - # a json version that allows us to specify we want to - # use an ordereddict instead of a normal dict for the - # JSON objects. The 2.7 json module has this. For 2.6 - # we need simplejson. - requires.append('simplejson==3.3.0') - requires.append('python-dateutil>=2.1,<2.7.0') -else: - requires.append('python-dateutil>=2.1,<2.8.1') - -if sys.version_info[:2] == (2, 6): - requires.append('urllib3>=1.20,<1.24') -elif sys.version_info[:2] == (3, 3): - requires.append('urllib3>=1.20,<1.23') +if sys.version_info[:2] == (3, 4): + # urllib3 dropped support for python 3.4 in point release 1.25.8 + requires.append('urllib3>=1.20,<1.25.8') else: requires.append('urllib3>=1.20,<1.26') + setup( name='botocore', version=find_version("botocore", "__init__.py"), @@ -63,12 +51,7 @@ setup( 'botocore.vendored.requests': ['*.pem']}, include_package_data=True, install_requires=requires, - extras_require={ - ':python_version=="2.6"': [ - 'ordereddict==1.1', - 'simplejson==3.3.0', - ] - }, + extras_require={}, license="Apache License 2.0", classifiers=[ 'Development Status :: 5 - Production/Stable', @@ -78,13 +61,12 @@ setup( 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', ] ) diff --git a/tests/__init__.py b/tests/__init__.py index ba85a6ff..c08780c0 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -27,13 +27,7 @@ from io import BytesIO from subprocess import Popen, PIPE from dateutil.tz import tzlocal -# The unittest module got a significant overhaul -# in 2.7, so if we're in 2.6 we can use the backported -# version unittest2. -if sys.version_info[:2] == (2, 6): - import unittest2 as unittest -else: - import unittest +import unittest from nose.tools import assert_equal diff --git a/tests/functional/test_iot_data.py b/tests/functional/test_iot_data.py index b3fe3853..e9eeb8a0 100644 --- a/tests/functional/test_iot_data.py +++ b/tests/functional/test_iot_data.py @@ -16,8 +16,6 @@ from tests import unittest, mock, BaseSessionTest from botocore.exceptions import UnsupportedTLSVersionWarning -@unittest.skipIf(sys.version_info[:2] == (2, 6), - ("py26 is unable to detect openssl version")) class TestOpensslVersion(BaseSessionTest): def test_incompatible_openssl_version(self): with mock.patch('ssl.OPENSSL_VERSION_INFO', new=(0, 9, 8, 11, 15)): diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index f4d96f7b..a21b9668 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -21,6 +21,7 @@ from botocore.config import Config from botocore.compat import urlsplit from botocore.compat import parse_qs from botocore.exceptions import ParamValidationError, ClientError +from botocore.exceptions import InvalidS3UsEast1RegionalEndpointConfigError from botocore import UNSIGNED @@ -269,6 +270,144 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest): } ) + def test_use_arn_region_is_case_insensitive(self): + self.environ['AWS_S3_USE_ARN_REGION'] = 'True' + client = self.create_s3_client() + self.assertEqual( + client.meta.config.s3, + { + 'use_arn_region': True, + } + ) + + + def test_us_east_1_regional_env_var(self): + self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + client = self.create_s3_client() + self.assertEqual( + client.meta.config.s3, + { + 'us_east_1_regional_endpoint': 'regional', + } + ) + + def test_us_east_1_regional_config_var(self): + with temporary_file('w') as f: + self.set_config_file( + f, + '[default]\n' + 's3_us_east_1_regional_endpoint = regional' + ) + client = self.create_s3_client() + self.assertEqual( + client.meta.config.s3, + { + 'us_east_1_regional_endpoint': 'regional', + } + ) + + def test_us_east_1_regional_nested_config_var(self): + with temporary_file('w') as f: + self.set_config_file( + f, + '[default]\n' + 's3 = \n' + ' us_east_1_regional_endpoint = regional' + ) + client = self.create_s3_client() + self.assertEqual( + client.meta.config.s3, + { + 'us_east_1_regional_endpoint': 'regional', + } + ) + + def test_us_east_1_regional_env_var_overrides_config_var(self): + self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + with temporary_file('w') as f: + self.set_config_file( + f, + '[default]\n' + 's3 = \n' + ' us_east_1_regional_endpoint = legacy' + ) + client = self.create_s3_client() + self.assertEqual( + client.meta.config.s3, + { + 'us_east_1_regional_endpoint': 'regional', + } + ) + + def test_client_config_us_east_1_regional_overrides_env_var(self): + self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + client = self.create_s3_client( + config=Config( + s3={'us_east_1_regional_endpoint': 'legacy'} + ) + ) + self.assertEqual( + client.meta.config.s3, + { + 'us_east_1_regional_endpoint': 'legacy', + } + ) + + def test_client_config_us_east_1_regional_overrides_config_var(self): + with temporary_file('w') as f: + self.set_config_file( + f, + '[default]\n' + 's3 = \n' + ' us_east_1_regional_endpoint = legacy' + ) + client = self.create_s3_client( + config=Config( + s3={'us_east_1_regional_endpoint': 'regional'} + ) + ) + self.assertEqual( + client.meta.config.s3, + { + 'us_east_1_regional_endpoint': 'regional', + } + ) + + def test_client_validates_us_east_1_regional(self): + with self.assertRaises(InvalidS3UsEast1RegionalEndpointConfigError): + self.create_s3_client( + config=Config( + s3={'us_east_1_regional_endpoint': 'not-valid'} + ) + ) + + def test_client_region_defaults_to_us_east_1(self): + client = self.create_s3_client(region_name=None) + self.assertEqual(client.meta.region_name, 'us-east-1') + + def test_client_region_remains_us_east_1(self): + client = self.create_s3_client(region_name='us-east-1') + self.assertEqual(client.meta.region_name, 'us-east-1') + + def test_client_region_remains_aws_global(self): + client = self.create_s3_client(region_name='aws-global') + self.assertEqual(client.meta.region_name, 'aws-global') + + def test_client_region_defaults_to_aws_global_for_regional(self): + self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + client = self.create_s3_client(region_name=None) + self.assertEqual(client.meta.region_name, 'aws-global') + + def test_client_region_remains_us_east_1_for_regional(self): + self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + client = self.create_s3_client(region_name='us-east-1') + self.assertEqual(client.meta.region_name, 'us-east-1') + + def test_client_region_remains_aws_global_for_regional(self): + self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + client = self.create_s3_client(region_name='aws-global') + self.assertEqual(client.meta.region_name, 'aws-global') + class TestAccesspointArn(BaseS3ClientConfigurationTest): _V4_AUTH_REGEX = re.compile( @@ -771,6 +910,15 @@ class TestRegionRedirect(BaseS3OperationTest): class TestGeneratePresigned(BaseS3OperationTest): + def assert_is_v2_presigned_url(self, url): + qs_components = parse_qs(urlsplit(url).query) + # Assert that it looks like a v2 presigned url by asserting it does + # not have a couple of the v4 qs components and assert that it has the + # v2 Signature component. + self.assertNotIn('X-Amz-Credential', qs_components) + self.assertNotIn('X-Amz-Algorithm', qs_components) + self.assertIn('Signature', qs_components) + def test_generate_unauthed_url(self): config = Config(signature_version=botocore.UNSIGNED) client = self.session.create_client('s3', self.region, config=config) @@ -868,6 +1016,39 @@ class TestGeneratePresigned(BaseS3OperationTest): } self.assertEqual(parts, expected) + def test_presign_uses_v2_for_aws_global(self): + client = self.session.create_client('s3', 'aws-global') + url = client.generate_presigned_url( + 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + self.assert_is_v2_presigned_url(url) + + def test_presign_uses_v2_for_default_region_with_us_east_1_regional(self): + config = Config(s3={'us_east_1_regional_endpoint': 'regional'}) + client = self.session.create_client('s3', config=config) + url = client.generate_presigned_url( + 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + self.assert_is_v2_presigned_url(url) + + def test_presign_uses_v2_for_aws_global_with_us_east_1_regional(self): + config = Config(s3={'us_east_1_regional_endpoint': 'regional'}) + client = self.session.create_client('s3', 'aws-global', config=config) + url = client.generate_presigned_url( + 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + self.assert_is_v2_presigned_url(url) + + def test_presign_uses_v2_for_us_east_1(self): + client = self.session.create_client('s3', 'us-east-1') + url = client.generate_presigned_url( + 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + self.assert_is_v2_presigned_url(url) + + def test_presign_uses_v2_for_us_east_1_with_us_east_1_regional(self): + config = Config(s3={'us_east_1_regional_endpoint': 'regional'}) + client = self.session.create_client('s3', 'us-east-1', config=config) + url = client.generate_presigned_url( + 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + self.assert_is_v2_presigned_url(url) + def test_correct_url_used_for_s3(): # Test that given various sets of config options and bucket names, @@ -1095,6 +1276,20 @@ def test_correct_url_used_for_s3(): s3_config=use_dualstack, signature_version='s3', # Still default to virtual hosted when possible on sigv2. expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') + yield t.case( + region=None, bucket='bucket', key='key', + s3_config=use_dualstack, + # Uses us-east-1 for no region set. + expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') + yield t.case( + region='aws-global', bucket='bucket', key='key', + s3_config=use_dualstack, + # Pseudo-regions should not have any special resolving logic even when + # the endpoint won't work as we do not have the metadata to know that + # a region does not support dualstack. So just format it based on the + # region name. + expected_url=( + 'https://bucket.s3.dualstack.aws-global.amazonaws.com/key')) yield t.case( region='us-west-2', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3', @@ -1410,6 +1605,67 @@ def test_correct_url_used_for_s3(): ) ) + # Use us-east-1 regional endpoint cases: regional + us_east_1_regional_endpoint = { + 'us_east_1_regional_endpoint': 'regional' + } + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint, + expected_url=( + 'https://bucket.s3.us-east-1.amazonaws.com/key')) + yield t.case( + region='us-west-2', bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint, + expected_url=( + 'https://bucket.s3.us-west-2.amazonaws.com/key')) + yield t.case( + region=None, bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint, + expected_url=( + 'https://bucket.s3.amazonaws.com/key')) + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config={ + 'us_east_1_regional_endpoint': 'regional', + 'use_dualstack_endpoint': True, + }, + expected_url=( + 'https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')) + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config={ + 'us_east_1_regional_endpoint': 'regional', + 'use_accelerate_endpoint': True, + }, + expected_url=( + 'https://bucket.s3-accelerate.amazonaws.com/key')) + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config={ + 'us_east_1_regional_endpoint': 'regional', + 'use_accelerate_endpoint': True, + 'use_dualstack_endpoint': True, + }, + expected_url=( + 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + + # Use us-east-1 regional endpoint cases: legacy + us_east_1_regional_endpoint_legacy = { + 'us_east_1_regional_endpoint': 'legacy' + } + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint_legacy, + expected_url=( + 'https://bucket.s3.amazonaws.com/key')) + + yield t.case( + region=None, bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint_legacy, + expected_url=( + 'https://bucket.s3.amazonaws.com/key')) + class S3AddressingCases(object): def __init__(self, verify_function): @@ -1577,6 +1833,21 @@ def test_addressing_for_presigned_urls(): ) ) + # Use us-east-1 regional endpoint configuration cases + us_east_1_regional_endpoint = { + 'us_east_1_regional_endpoint': 'regional' + } + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint, signature_version='s3', + expected_url=( + 'https://bucket.s3.us-east-1.amazonaws.com/key')) + yield t.case( + region='us-east-1', bucket='bucket', key='key', + s3_config=us_east_1_regional_endpoint, signature_version='s3v4', + expected_url=( + 'https://bucket.s3.us-east-1.amazonaws.com/key')) + def _verify_presigned_url_addressing(region, bucket, key, s3_config, is_secure=True, diff --git a/tests/functional/test_six_threading.py b/tests/functional/test_six_threading.py index 5ef9bc37..18f87ba5 100644 --- a/tests/functional/test_six_threading.py +++ b/tests/functional/test_six_threading.py @@ -25,9 +25,6 @@ def _reload_six(): # moved modules are reset. if sys.version_info < (3, 0): reload(six) - elif sys.version_info < (3, 4): - import imp - imp.reload(six) else: import importlib importlib.reload(six) diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py index a4408bf3..17c041f3 100644 --- a/tests/unit/test_awsrequest.py +++ b/tests/unit/test_awsrequest.py @@ -471,9 +471,6 @@ class TestAWSHTTPConnection(unittest.TestCase): with self.assertRaises(socket.error): conn._tunnel() - @unittest.skipIf(sys.version_info[:2] == (2, 6), - ("``_tunnel()`` function defaults to standard " - "http library function when not py26.")) def test_tunnel_uses_std_lib(self): s = FakeSocket(b'HTTP/1.1 200 OK\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py index f5b1b1a4..4f9af5d9 100644 --- a/tests/unit/test_parsers.py +++ b/tests/unit/test_parsers.py @@ -12,20 +12,19 @@ # language governing permissions and limitations under the License. from tests import unittest, RawResponse import datetime -import collections from dateutil.tz import tzutc from nose.tools import assert_equal from botocore import parsers from botocore import model -from botocore.compat import json +from botocore.compat import json, MutableMapping # HTTP responses will typically return a custom HTTP # dict. We want to ensure we're able to work with any # kind of mutable mapping implementation. -class CustomHeaderDict(collections.MutableMapping): +class CustomHeaderDict(MutableMapping): def __init__(self, original_dict): self._d = original_dict