diff --git a/MANIFEST.in b/MANIFEST.in index acb1656f..92173752 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,7 @@ include README.rst include LICENSE.txt include requirements.txt +include botocore/cacert.pem include botocore/vendored/requests/cacert.pem recursive-include botocore/data *.json graft docs diff --git a/PKG-INFO b/PKG-INFO index 0eab4492..b12bf02d 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.10.78 +Version: 1.12.16 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -24,7 +24,7 @@ Description: botocore Documentation ------------- - Documentation for ``botocore`` can be found on `Read the Docs `__. + Documentation for ``botocore`` can be found on `Read the Docs `__. Getting Help diff --git a/README.rst b/README.rst index ed9decbb..ab9d440f 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ botocore package is the foundation for the Documentation ------------- -Documentation for ``botocore`` can be found on `Read the Docs `__. +Documentation for ``botocore`` can be found on `Read the Docs `__. Getting Help diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 0eab4492..b12bf02d 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.10.78 +Version: 1.12.16 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -24,7 +24,7 @@ Description: botocore Documentation ------------- - Documentation for ``botocore`` can be found on `Read the Docs `__. + Documentation for ``botocore`` can be found on `Read the Docs `__. Getting Help diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 0f87551c..39180f2e 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -8,6 +8,7 @@ botocore/__init__.py botocore/args.py botocore/auth.py botocore/awsrequest.py +botocore/cacert.pem botocore/client.py botocore/compat.py botocore/config.py @@ -20,6 +21,7 @@ botocore/exceptions.py botocore/handlers.py botocore/history.py botocore/hooks.py +botocore/httpsession.py botocore/loaders.py botocore/model.py botocore/paginate.py @@ -254,6 +256,7 @@ botocore/data/efs/2015-02-01/paginators-1.json botocore/data/efs/2015-02-01/service-2.json botocore/data/eks/2017-11-01/paginators-1.json botocore/data/eks/2017-11-01/service-2.json +botocore/data/eks/2017-11-01/service-2.sdk-extras.json botocore/data/elasticache/2014-09-30/paginators-1.json botocore/data/elasticache/2014-09-30/service-2.json botocore/data/elasticache/2014-09-30/waiters-2.json @@ -390,6 +393,7 @@ botocore/data/mturk/2017-01-17/paginators-1.json botocore/data/mturk/2017-01-17/service-2.json botocore/data/neptune/2014-10-31/paginators-1.json botocore/data/neptune/2014-10-31/service-2.json +botocore/data/neptune/2014-10-31/service-2.sdk-extras.json botocore/data/neptune/2014-10-31/waiters-2.json botocore/data/opsworks/2013-02-18/examples-1.json botocore/data/opsworks/2013-02-18/paginators-1.json @@ -448,6 +452,7 @@ botocore/data/sdb/2009-04-15/paginators-1.json botocore/data/sdb/2009-04-15/service-2.json botocore/data/secretsmanager/2017-10-17/paginators-1.json botocore/data/secretsmanager/2017-10-17/service-2.json +botocore/data/secretsmanager/2017-10-17/service-2.sdk-extras.json botocore/data/serverlessrepo/2017-09-08/paginators-1.json botocore/data/serverlessrepo/2017-09-08/service-2.json botocore/data/servicecatalog/2015-12-10/examples-1.json @@ -462,6 +467,9 @@ botocore/data/ses/2010-12-01/waiters-2.json botocore/data/shield/2016-06-02/examples-1.json botocore/data/shield/2016-06-02/paginators-1.json botocore/data/shield/2016-06-02/service-2.json +botocore/data/signer/2017-08-25/paginators-1.json +botocore/data/signer/2017-08-25/service-2.json +botocore/data/signer/2017-08-25/waiters-2.json botocore/data/sms/2016-10-24/examples-1.json botocore/data/sms/2016-10-24/paginators-1.json botocore/data/sms/2016-10-24/service-2.json @@ -616,9 +624,11 @@ docs/make.bat docs/source/client_upgrades.rst docs/source/conf.py docs/source/index.rst +docs/source/_static/404.html docs/source/development/changesfor10.rst docs/source/development/designnotes.rst docs/source/development/index.rst +docs/source/reference/awsrequest.rst docs/source/reference/config.rst docs/source/reference/eventstream.rst docs/source/reference/index.rst @@ -696,6 +706,7 @@ tests/functional/test_cognito_idp.py tests/functional/test_credentials.py tests/functional/test_ec2.py tests/functional/test_endpoints.py +tests/functional/test_event_alias.py tests/functional/test_h2_required.py tests/functional/test_history.py tests/functional/test_iot_data.py @@ -705,6 +716,7 @@ tests/functional/test_loaders.py tests/functional/test_machinelearning.py tests/functional/test_model_completeness.py tests/functional/test_mturk.py +tests/functional/test_neptune.py tests/functional/test_paginate.py tests/functional/test_paginator_config.py tests/functional/test_public_apis.py @@ -739,6 +751,7 @@ tests/integration/__init__.py tests/integration/test-credentials tests/integration/test_apigateway.py tests/integration/test_client.py +tests/integration/test_client_http.py tests/integration/test_cloudformation.py tests/integration/test_cognito_identity.py tests/integration/test_credentials.py @@ -759,6 +772,7 @@ tests/integration/test_waiters.py tests/unit/__init__.py tests/unit/put_object_data tests/unit/test_args.py +tests/unit/test_auth_sigv4.py tests/unit/test_awsrequest.py tests/unit/test_client.py tests/unit/test_compat.py @@ -771,6 +785,8 @@ tests/unit/test_exceptions.py tests/unit/test_handlers.py tests/unit/test_history.py tests/unit/test_hooks.py +tests/unit/test_http_client_exception_mapping.py +tests/unit/test_http_session.py tests/unit/test_idempotency.py tests/unit/test_loaders.py tests/unit/test_model.py diff --git a/botocore.egg-info/requires.txt b/botocore.egg-info/requires.txt index 4a2e3aa6..17f1bdb0 100644 --- a/botocore.egg-info/requires.txt +++ b/botocore.egg-info/requires.txt @@ -1,5 +1,6 @@ jmespath<1.0.0,>=0.7.1 docutils>=0.10 +urllib3<1.24,>=1.20 python-dateutil<3.0.0,>=2.1 [:python_version=="2.6"] diff --git a/botocore/__init__.py b/botocore/__init__.py index 7e3b6940..464aa0e6 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.10.78' +__version__ = '1.12.16' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index fb6b5bc2..15235145 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -61,10 +61,11 @@ class ClientArgsCreator(object): event_emitter = copy.copy(self._event_emitter) signer = RequestSigner( - service_name, signing_region, + service_model.service_id, signing_region, endpoint_config['signing_name'], endpoint_config['signature_version'], - credentials, event_emitter) + credentials, event_emitter + ) config_kwargs['s3'] = s3_config new_config = Config(**config_kwargs) diff --git a/botocore/auth.py b/botocore/auth.py index b81764be..59dccc62 100644 --- a/botocore/auth.py +++ b/botocore/auth.py @@ -180,7 +180,11 @@ class SigV4Auth(BaseSigner): if lname not in SIGNED_HEADERS_BLACKLIST: header_map[lname] = value if 'host' not in header_map: - header_map['host'] = self._canonical_host(request.url) + # Ensure we sign the lowercased version of the host, as that + # is what will ultimately be sent on the wire. + # TODO: We should set the host ourselves, instead of relying on our + # HTTP client to set it for us. + header_map['host'] = self._canonical_host(request.url).lower() return header_map def _canonical_host(self, url): diff --git a/botocore/awsrequest.py b/botocore/awsrequest.py index c26ebc07..a8ef2632 100644 --- a/botocore/awsrequest.py +++ b/botocore/awsrequest.py @@ -13,26 +13,21 @@ # language governing permissions and limitations under the License. import sys import logging -import select import functools import socket -import inspect +import collections +import urllib3.util +from urllib3.connection import VerifiedHTTPSConnection +from urllib3.connection import HTTPConnection +from urllib3.connectionpool import HTTPConnectionPool +from urllib3.connectionpool import HTTPSConnectionPool + +import botocore.utils from botocore.compat import six -from botocore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit,\ - urlparse +from botocore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit, \ + urlencode from botocore.exceptions import UnseekableStreamError -from botocore.utils import percent_encode_sequence -from botocore.vendored.requests import models -from botocore.vendored.requests.sessions import REDIRECT_STATI -from botocore.vendored.requests.packages.urllib3.connection import \ - VerifiedHTTPSConnection -from botocore.vendored.requests.packages.urllib3.connection import \ - HTTPConnection -from botocore.vendored.requests.packages.urllib3.connectionpool import \ - HTTPConnectionPool -from botocore.vendored.requests.packages.urllib3.connectionpool import \ - HTTPSConnectionPool logger = logging.getLogger(__name__) @@ -54,10 +49,10 @@ class AWSHTTPResponse(HTTPResponse): return HTTPResponse._read_status(self) -class AWSHTTPConnection(HTTPConnection): - """HTTPConnection that supports Expect 100-continue. +class AWSConnection(object): + """Mixin for HTTPConnection that supports Expect 100-continue. - This is conceptually a subclass of httplib.HTTPConnection (though + This when mixed with a subclass of httplib.HTTPConnection (though technically we subclass from urllib3, which subclasses httplib.HTTPConnection) and we only override this class to support Expect 100-continue, which we need for S3. As far as I can tell, this is @@ -67,7 +62,7 @@ class AWSHTTPConnection(HTTPConnection): """ def __init__(self, *args, **kwargs): - HTTPConnection.__init__(self, *args, **kwargs) + super(AWSConnection, self).__init__(*args, **kwargs) self._original_response_cls = self.response_class # We'd ideally hook into httplib's states, but they're all # __mangled_vars so we use our own state var. This variable is set @@ -81,7 +76,7 @@ class AWSHTTPConnection(HTTPConnection): self._expect_header_set = False def close(self): - HTTPConnection.close(self) + super(AWSConnection, self).close() # Reset all of our instance state we were tracking. self._response_received = False self._expect_header_set = False @@ -96,7 +91,7 @@ class AWSHTTPConnection(HTTPConnection): # difference from py26 to py3 is very minimal. We're essentially # just overriding the while loop. if sys.version_info[:2] != (2, 6): - return HTTPConnection._tunnel(self) + return super(AWSConnection, self)._tunnel() # Otherwise we workaround the issue. self._set_hostport(self._tunnel_host, self._tunnel_port) @@ -126,8 +121,8 @@ class AWSHTTPConnection(HTTPConnection): else: self._expect_header_set = False self.response_class = self._original_response_cls - rval = HTTPConnection._send_request( - self, method, url, body, headers, *args, **kwargs) + rval = super(AWSConnection, self)._send_request( + method, url, body, headers, *args, **kwargs) self._expect_header_set = False return rval @@ -160,8 +155,7 @@ class AWSHTTPConnection(HTTPConnection): # set, it will trigger this custom behavior. logger.debug("Waiting for 100 Continue response.") # Wait for 1 second for the server to send a response. - read, write, exc = select.select([self.sock], [], [self.sock], 1) - if read: + if urllib3.util.wait_for_read(self.sock, 1): self._handle_expect_response(message_body) return else: @@ -239,7 +233,7 @@ class AWSHTTPConnection(HTTPConnection): logger.debug("send() called, but reseponse already received. " "Not sending data.") return - return HTTPConnection.send(self, str) + return super(AWSConnection, self).send(str) def _is_100_continue_status(self, maybe_status_line): parts = maybe_status_line.split(None, 2) @@ -249,16 +243,20 @@ class AWSHTTPConnection(HTTPConnection): parts[1] == b'100') -class AWSHTTPSConnection(VerifiedHTTPSConnection): - pass +class AWSHTTPConnection(AWSConnection, HTTPConnection): + """ An HTTPConnection that supports 100 Continue behavior. """ -# Now we need to set the methods we overrode from AWSHTTPConnection -# onto AWSHTTPSConnection. This is just a shortcut to avoid -# copy/pasting the same code into AWSHTTPSConnection. -for name, function in AWSHTTPConnection.__dict__.items(): - if inspect.isfunction(function): - setattr(AWSHTTPSConnection, name, function) +class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection): + """ An HTTPSConnection that supports 100 Continue behavior. """ + + +class AWSHTTPConnectionPool(HTTPConnectionPool): + ConnectionCls = AWSHTTPConnection + + +class AWSHTTPSConnectionPool(HTTPSConnectionPool): + ConnectionCls = AWSHTTPSConnection def prepare_request_dict(request_dict, endpoint_url, context=None, @@ -285,6 +283,10 @@ def prepare_request_dict(request_dict, endpoint_url, context=None, headers['User-Agent'] = user_agent url = _urljoin(endpoint_url, r['url_path']) if r['query_string']: + # NOTE: This is to avoid circular import with utils. This is being + # done to avoid moving classes to different modules as to not cause + # breaking chainges. + percent_encode_sequence = botocore.utils.percent_encode_sequence encoded_query_string = percent_encode_sequence(r['query_string']) if '?' not in url: url += '?%s' % encoded_query_string @@ -338,18 +340,142 @@ def _urljoin(endpoint_url, url_path): return reconstructed -class AWSRequest(models.RequestEncodingMixin, models.Request): - def __init__(self, *args, **kwargs): - self.auth_path = None - if 'auth_path' in kwargs: - self.auth_path = kwargs['auth_path'] - del kwargs['auth_path'] - models.Request.__init__(self, *args, **kwargs) - headers = HTTPHeaders() - if self.headers is not None: - for key, value in self.headers.items(): - headers[key] = value - self.headers = headers +class AWSRequestPreparer(object): + """ + This class performs preparation on AWSRequest objects similar to that of + the PreparedRequest class does in the requests library. However, the logic + has been boiled down to meet the specific use cases in botocore. Of note + there are the following differences: + This class does not heavily prepare the URL. Requests performed many + validations and corrections to ensure the URL is properly formatted. + Botocore either performs these validations elsewhere or otherwise + consistently provides well formatted URLs. + + This class does not heavily prepare the body. Body preperation is + simple and supports only the cases that we document: bytes and + file-like objects to determine the content-length. This will also + additionally prepare a body that is a dict to be url encoded params + string as some signers rely on this. Finally, this class does not + support multipart file uploads. + + This class does not prepare the method, auth or cookies. + """ + def prepare(self, original): + method = original.method + url = self._prepare_url(original) + body = self._prepare_body(original) + headers = self._prepare_headers(original, body) + stream_output = original.stream_output + + return AWSPreparedRequest(method, url, headers, body, stream_output) + + def _prepare_url(self, original): + url = original.url + if original.params: + params = urlencode(list(original.params.items()), doseq=True) + url = '%s?%s' % (url, params) + return url + + def _prepare_headers(self, original, prepared_body=None): + headers = HeadersDict(original.headers.items()) + + # If the transfer encoding or content length is already set, use that + if 'Transfer-Encoding' in headers or 'Content-Length' in headers: + return headers + + # Ensure we set the content length when it is expected + if original.method not in ('GET', 'HEAD', 'OPTIONS'): + length = self._determine_content_length(prepared_body) + if length is not None: + headers['Content-Length'] = str(length) + else: + # Failed to determine content length, using chunked + # NOTE: This shouldn't ever happen in practice + body_type = type(prepared_body) + logger.debug('Failed to determine length of %s', body_type) + headers['Transfer-Encoding'] = 'chunked' + + return headers + + def _to_utf8(self, item): + key, value = item + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(value, six.text_type): + value = value.encode('utf-8') + return key, value + + def _prepare_body(self, original): + """Prepares the given HTTP body data.""" + body = original.data + if body == b'': + body = None + + if isinstance(body, dict): + params = [self._to_utf8(item) for item in body.items()] + body = urlencode(params, doseq=True) + + return body + + def _determine_content_length(self, body): + # No body, content length of 0 + if not body: + return 0 + + # Try asking the body for it's length + try: + return len(body) + except (AttributeError, TypeError) as e: + pass + + # Try getting the length from a seekable stream + if hasattr(body, 'seek') and hasattr(body, 'tell'): + orig_pos = body.tell() + body.seek(0, 2) + end_file_pos = body.tell() + body.seek(orig_pos) + return end_file_pos - orig_pos + + # Failed to determine the length + return None + + +class AWSRequest(object): + """Represents the elements of an HTTP request. + + This class was originally inspired by requests.models.Request, but has been + boiled down to meet the specific use cases in botocore. That being said this + class (even in requests) is effectively a named-tuple. + """ + + _REQUEST_PREPARER_CLS = AWSRequestPreparer + + def __init__(self, + method=None, + url=None, + headers=None, + data=None, + params=None, + auth_path=None, + stream_output=False): + + self._request_preparer = self._REQUEST_PREPARER_CLS() + + # Default empty dicts for dict params. + params = {} if params is None else params + + self.method = method + self.url = url + self.headers = HTTPHeaders() + self.data = data + self.params = params + self.auth_path = auth_path + self.stream_output = stream_output + + if headers is not None: + for key, value in headers.items(): + self.headers[key] = value + # This is a dictionary to hold information that is used when # processing the request. What is inside of ``context`` is open-ended. # For example, it may have a timestamp key that is used for holding @@ -361,65 +487,57 @@ class AWSRequest(models.RequestEncodingMixin, models.Request): def prepare(self): """Constructs a :class:`AWSPreparedRequest `.""" - # Eventually I think it would be nice to add hooks into this process. - p = AWSPreparedRequest(self) - p.prepare_method(self.method) - p.prepare_url(self.url, self.params) - p.prepare_headers(self.headers) - p.prepare_cookies(self.cookies) - p.prepare_body(self.data, self.files) - p.prepare_auth(self.auth) - return p + return self._request_preparer.prepare(self) @property def body(self): - p = models.PreparedRequest() - p.prepare_headers({}) - p.prepare_body(self.data, self.files) - if isinstance(p.body, six.text_type): - p.body = p.body.encode('utf-8') - return p.body + body = self.prepare().body + if isinstance(body, six.text_type): + body = body.encode('utf-8') + return body -class AWSPreparedRequest(models.PreparedRequest): - """Represents a prepared request. +class AWSPreparedRequest(object): + """A data class representing a finalized request to be sent over the wire. - :ivar method: HTTP Method + Requests at this stage should be treated as final, and the properties of + the request should not be modified. + + :ivar method: The HTTP Method :ivar url: The full url :ivar headers: The HTTP headers to send. :ivar body: The HTTP body. - :ivar hooks: The set of callback hooks. - - In addition to the above attributes, the following attributes are - available: - - :ivar query_params: The original query parameters. - :ivar post_param: The original POST params (dict). - + :ivar stream_output: If the response for this request should be streamed. """ - def __init__(self, original_request): - self.original = original_request - super(AWSPreparedRequest, self).__init__() - self.hooks.setdefault('response', []).append( - self.reset_stream_on_redirect) + def __init__(self, method, url, headers, body, stream_output): + self.method = method + self.url = url + self.headers = headers + self.body = body + self.stream_output = stream_output - def reset_stream_on_redirect(self, response, **kwargs): - if response.status_code in REDIRECT_STATI and \ - self._looks_like_file(self.body): - logger.debug("Redirect received, rewinding stream: %s", self.body) - self.reset_stream() - - def _looks_like_file(self, body): - return hasattr(body, 'read') and hasattr(body, 'seek') + def __repr__(self): + fmt = ( + '' + ) + return fmt % (self.stream_output, self.method, self.url, self.headers) def reset_stream(self): + """Resets the streaming body to it's initial position. + + If the request contains a streaming body (a streamable file-like object) + seek to the object's initial position to ensure the entire contents of + the object is sent. This is a no-op for static bytes-like body types. + """ # Trying to reset a stream when there is a no stream will # just immediately return. It's not an error, it will produce # the same result as if we had actually reset the stream (we'll send # the entire body contents again if we need to). - # Same case if the body is a string/bytes type. - if self.body is None or isinstance(self.body, six.text_type) or \ - isinstance(self.body, six.binary_type): + # Same case if the body is a string/bytes/bytearray type. + + non_seekable_types = (six.binary_type, six.text_type, bytearray) + if self.body is None or isinstance(self.body, non_seekable_types): return try: logger.debug("Rewinding stream: %s", self.body) @@ -428,28 +546,97 @@ class AWSPreparedRequest(models.PreparedRequest): logger.debug("Unable to rewind stream: %s", e) raise UnseekableStreamError(stream_object=self.body) - def prepare_body(self, data, files, json=None): - """Prepares the given HTTP body data.""" - super(AWSPreparedRequest, self).prepare_body(data, files, json) - # Calculate the Content-Length by trying to seek the file as - # requests cannot determine content length for some seekable file-like - # objects. - if 'Content-Length' not in self.headers: - if hasattr(data, 'seek') and hasattr(data, 'tell'): - orig_pos = data.tell() - data.seek(0, 2) - end_file_pos = data.tell() - self.headers['Content-Length'] = str(end_file_pos - orig_pos) - data.seek(orig_pos) - # If the Content-Length was added this way, a - # Transfer-Encoding was added by requests because it did - # not add a Content-Length header. However, the - # Transfer-Encoding header is not supported for - # AWS Services so remove it if it is added. - if 'Transfer-Encoding' in self.headers: - self.headers.pop('Transfer-Encoding') +class AWSResponse(object): + """A data class representing an HTTP response. + + This class was originally inspired by requests.models.Response, but has + been boiled down to meet the specific use cases in botocore. This has + effectively been reduced to a named tuple. + + :ivar url: The full url. + :ivar status_code: The status code of the HTTP response. + :ivar headers: The HTTP headers received. + :ivar body: The HTTP response body. + """ + + def __init__(self, url, status_code, headers, raw): + self.url = url + self.status_code = status_code + self.headers = HeadersDict(headers) + self.raw = raw + + self._content = None + + @property + def content(self): + """Content of the response as bytes.""" + + if self._content is None: + # Read the contents. + # NOTE: requests would attempt to call stream and fall back + # to a custom generator that would call read in a loop, but + # we don't rely on this behavior + self._content = bytes().join(self.raw.stream()) or bytes() + + return self._content + + @property + def text(self): + """Content of the response as a proper text type. + + Uses the encoding type provided in the reponse headers to decode the + response content into a proper text type. If the encoding is not + present in the headers, UTF-8 is used as a default. + """ + encoding = botocore.utils.get_encoding_from_headers(self.headers) + if encoding: + return self.content.decode(encoding) + else: + return self.content.decode('utf-8') -HTTPSConnectionPool.ConnectionCls = AWSHTTPSConnection -HTTPConnectionPool.ConnectionCls = AWSHTTPConnection +class _HeaderKey(object): + def __init__(self, key): + self._key = key + self._lower = key.lower() + + def __hash__(self): + return hash(self._lower) + + def __eq__(self, other): + return isinstance(other, _HeaderKey) and self._lower == other._lower + + def __str__(self): + return self._key + + def __repr__(self): + return repr(self._key) + + +class HeadersDict(collections.MutableMapping): + """A case-insenseitive dictionary to represent HTTP headers. """ + def __init__(self, *args, **kwargs): + self._dict = {} + self.update(*args, **kwargs) + + def __setitem__(self, key, value): + self._dict[_HeaderKey(key)] = value + + def __getitem__(self, key): + return self._dict[_HeaderKey(key)] + + def __delitem__(self, key): + del self._dict[_HeaderKey(key)] + + def __iter__(self): + return (str(key) for key in self._dict) + + def __len__(self): + return len(self._dict) + + def __repr__(self): + return repr(self._dict) + + def copy(self): + return HeadersDict(self.items()) diff --git a/botocore/cacert.pem b/botocore/cacert.pem new file mode 100644 index 00000000..101ac98f --- /dev/null +++ b/botocore/cacert.pem @@ -0,0 +1,4433 @@ + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 3 Public Primary Certification Authority - G3" +# Serial: 206684696279472310254277870180966723415 +# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 +# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 +# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Label: "AddTrust External Root" +# Serial: 1 +# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f +# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 +# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. +# Label: "GeoTrust Global CA" +# Serial: 144470 +# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 +# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 +# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Label: "GeoTrust Universal CA" +# Serial: 1 +# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 +# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 +# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Universal CA 2" +# Serial: 1 +# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 +# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 +# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association +# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association +# Label: "Visa eCommerce Root" +# Serial: 25952180776285836048024890241505565794 +# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02 +# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62 +# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22 +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Label: "QuoVadis Root CA" +# Serial: 985026699 +# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 +# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 +# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=Sonera Class2 CA O=Sonera +# Subject: CN=Sonera Class2 CA O=Sonera +# Label: "Sonera Class 2 Root CA" +# Serial: 29 +# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb +# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 +# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: O=Government Root Certification Authority +# Subject: O=Government Root Certification Authority +# Label: "Taiwan GRCA" +# Serial: 42023070807708724159991140556527066870 +# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e +# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9 +# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ +MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR +IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q +gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy +yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts +F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 +jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx +ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC +VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK +YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH +EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN +Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud +DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE +MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK +UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf +qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK +ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE +JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 +hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 +EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm +nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX +udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz +ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe +LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl +pYYsfPQS +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=Class 2 Primary CA O=Certplus +# Subject: CN=Class 2 Primary CA O=Certplus +# Label: "Certplus Class 2 Primary CA" +# Serial: 177770208045934040241468760488327595043 +# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b +# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb +# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Label: "DST Root CA X3" +# Serial: 91299735575339953335919266965803778155 +# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 +# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 +# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Label: "GeoTrust Primary Certification Authority" +# Serial: 32798226551256963324313806436981982369 +# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf +# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 +# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA" +# Serial: 69529181992039203566298953787712940909 +# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 +# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 +# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" +# Serial: 33037644167568058970164719475676101450 +# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c +# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 +# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GA CA" +# Serial: 86718877871133159090080555911823548314 +# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93 +# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9 +# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5 +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Label: "Deutsche Telekom Root CA 2" +# Serial: 38 +# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 +# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf +# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G3" +# Serial: 28809105769928564313984085209975885599 +# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 +# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd +# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G2" +# Serial: 71758320672825410020661621085256472406 +# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f +# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 +# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G3" +# Serial: 127614157056681299805556476275995414779 +# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 +# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 +# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G2" +# Serial: 80682863203381065782177908751794619243 +# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a +# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 +# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Universal Root Certification Authority" +# Serial: 85209574734084581917763752644031726877 +# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 +# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 +# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" +# Serial: 63143484348153506665311985501458640051 +# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 +# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a +# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G2" +# Serial: 10000012 +# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a +# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16 +# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Label: "Chambers of Commerce Root - 2008" +# Serial: 11806822484801597146 +# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 +# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c +# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- + +# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Label: "Global Chambersign Root - 2008" +# Serial: 14541511773111788494 +# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 +# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c +# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2011" +# Serial: 0 +# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 +# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d +# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: O=Trustis Limited OU=Trustis FPS Root CA +# Subject: O=Trustis Limited OU=Trustis FPS Root CA +# Label: "Trustis FPS Root CA" +# Serial: 36053640375399034304724988975563710553 +# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d +# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 +# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Label: "EE Certification Centre Root CA" +# Serial: 112324828676200291871926431888494945866 +# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f +# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7 +# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76 +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 14367148294922964480859022125800977897474 +# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e +# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb +# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G3" +# Serial: 10003001 +# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 +# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc +# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 H5 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. +# Subject: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 H5 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. +# Label: "T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 H5" +# Serial: 156233699172481 +# MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e +# SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb +# SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78 +-----BEGIN CERTIFICATE----- +MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE +BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn +aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg +QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg +SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0 +MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD +VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 +dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom +/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR +Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3 +4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z +5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0 +hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID +AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX +SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l +VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq +URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf +peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF +Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW ++qtB4Uu2NQvAmxU= +-----END CERTIFICATE----- + +# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Label: "Certinomis - Root CA" +# Serial: 1 +# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f +# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 +# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=Certplus Root CA G1 O=Certplus +# Subject: CN=Certplus Root CA G1 O=Certplus +# Label: "Certplus Root CA G1" +# Serial: 1491911565779898356709731176965615564637713 +# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42 +# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66 +# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a +iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt +6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP +0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f +6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE +EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN +1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc +h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT +mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV +4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO +WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud +DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd +Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq +hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh +66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7 +/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS +S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j +2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R +Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr +RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy +6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV +V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5 +g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl +++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo= +-----END CERTIFICATE----- + +# Issuer: CN=Certplus Root CA G2 O=Certplus +# Subject: CN=Certplus Root CA G2 O=Certplus +# Label: "Certplus Root CA G2" +# Serial: 1492087096131536844209563509228951875861589 +# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31 +# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a +# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17 +-----BEGIN CERTIFICATE----- +MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat +93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x +Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj +FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG +SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch +p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal +U5ORGpOucGpnutee5WEaXw== +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust +# Subject: CN=OpenTrust Root CA G1 O=OpenTrust +# Label: "OpenTrust Root CA G1" +# Serial: 1492036577811947013770400127034825178844775 +# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da +# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e +# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4 +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b +wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX +/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0 +77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP +uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx +p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx +Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2 +TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W +G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw +vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY +EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1 +2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw +DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E +PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf +gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS +FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0 +V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P +XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I +i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t +TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91 +09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky +Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ +AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj +1oxx +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust +# Subject: CN=OpenTrust Root CA G2 O=OpenTrust +# Label: "OpenTrust Root CA G2" +# Serial: 1492012448042702096986875987676935573415441 +# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb +# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b +# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2 +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh +/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e +CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6 +1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE +FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS +gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X +G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy +YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH +vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4 +t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/ +gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3 +5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w +DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz +Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0 +nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT +RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT +wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2 +t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa +TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2 +o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU +3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA +iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f +WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM +S1IK +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust +# Subject: CN=OpenTrust Root CA G3 O=OpenTrust +# Label: "OpenTrust Root CA G3" +# Serial: 1492104908271485653071219941864171170455615 +# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24 +# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6 +# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92 +-----BEGIN CERTIFICATE----- +MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx +CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U +cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow +QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl +blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm +3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d +oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5 +DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK +BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q +j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx +4nxp5V2a+EEfOzmTk51V6s2N8fvB +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Label: "LuxTrust Global Root 2" +# Serial: 59914338225734147123941058376788110305822489521 +# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c +# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f +# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5 +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL +BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV +BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw +MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B +LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F +ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem +hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 +EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn +Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 +zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ +96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m +j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g +DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ +8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j +X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH +hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB +KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 +Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT ++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL +BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 +BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO +jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 +loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c +qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ +2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ +JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre +zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf +LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ +x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 +oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- diff --git a/botocore/client.py b/botocore/client.py index d34d4b1a..99ccecd7 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -90,9 +90,11 @@ class ClientCreator(object): py_name_to_operation_name = self._create_name_mapping(service_model) class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name bases = [BaseClient] - self._event_emitter.emit('creating-client-class.%s' % service_name, - class_attributes=class_attributes, - base_classes=bases) + service_id = service_model.service_id.hyphenize() + self._event_emitter.emit( + 'creating-client-class.%s' % service_id, + class_attributes=class_attributes, + base_classes=bases) class_name = get_service_module_name(service_model) cls = type(str(class_name), tuple(bases), class_attributes) return cls @@ -105,6 +107,8 @@ class ClientCreator(object): def _register_retries(self, client): endpoint_prefix = client.meta.service_model.endpoint_prefix + service_id = client.meta.service_model.service_id + service_event_name = service_id.hyphenize() # First, we load the entire retry config for all services, # then pull out just the information we need. @@ -122,9 +126,11 @@ class ClientCreator(object): client.meta.service_model.service_name) handler = self._retry_handler_factory.create_retry_handler( retry_config, endpoint_prefix) - unique_id = 'retry-config-%s' % endpoint_prefix - client.meta.events.register('needs-retry.%s' % endpoint_prefix, - handler, unique_id=unique_id) + unique_id = 'retry-config-%s' % service_event_name + client.meta.events.register( + 'needs-retry.%s' % service_event_name, handler, + unique_id=unique_id + ) def _register_s3_events(self, client, endpoint_bridge, endpoint_url, client_config, scoped_config): @@ -543,7 +549,9 @@ class BaseClient(object): self._register_handlers() def __getattr__(self, item): - event_name = 'getattr.%s.%s' % (self._service_model.service_name, item) + event_name = 'getattr.%s.%s' % ( + self._service_model.service_id.hyphenize(), item + ) handler, event_response = self.meta.events.emit_until_response( event_name, client=self) @@ -557,9 +565,11 @@ class BaseClient(object): def _register_handlers(self): # Register the handler required to sign requests. - self.meta.events.register('request-created.%s' % - self.meta.service_model.endpoint_prefix, - self._request_signer.handler) + service_id = self.meta.service_model.service_id.hyphenize() + self.meta.events.register( + 'request-created.%s' % service_id, + self._request_signer.handler + ) @property def _service_model(self): @@ -585,9 +595,10 @@ class BaseClient(object): request_dict = self._convert_to_request_dict( api_params, operation_model, context=request_context) + service_id = self._service_model.service_id.hyphenize() handler, event_response = self.meta.events.emit_until_response( - 'before-call.{endpoint_prefix}.{operation_name}'.format( - endpoint_prefix=self._service_model.endpoint_prefix, + 'before-call.{service_id}.{operation_name}'.format( + service_id=service_id, operation_name=operation_name), model=operation_model, params=request_dict, request_signer=self._request_signer, context=request_context) @@ -599,8 +610,8 @@ class BaseClient(object): operation_model, request_dict) self.meta.events.emit( - 'after-call.{endpoint_prefix}.{operation_name}'.format( - endpoint_prefix=self._service_model.endpoint_prefix, + 'after-call.{service_id}.{operation_name}'.format( + service_id=service_id, operation_name=operation_name), http_response=http, parsed=parsed_response, model=operation_model, context=request_context @@ -632,18 +643,19 @@ class BaseClient(object): # Emit an event that allows users to modify the parameters at the # beginning of the method. It allows handlers to modify existing # parameters or return a new set of parameters to use. + service_id = self._service_model.service_id.hyphenize() responses = self.meta.events.emit( - 'provide-client-params.{endpoint_prefix}.{operation_name}'.format( - endpoint_prefix=self._service_model.endpoint_prefix, + 'provide-client-params.{service_id}.{operation_name}'.format( + service_id=service_id, operation_name=operation_name), params=api_params, model=operation_model, context=context) api_params = first_non_none_response(responses, default=api_params) event_name = ( - 'before-parameter-build.{endpoint_prefix}.{operation_name}') + 'before-parameter-build.{service_id}.{operation_name}') self.meta.events.emit( event_name.format( - endpoint_prefix=self._service_model.endpoint_prefix, + service_id=service_id, operation_name=operation_name), params=api_params, model=operation_model, context=context) return api_params diff --git a/botocore/compat.py b/botocore/compat.py index edf4df46..47e1c813 100644 --- a/botocore/compat.py +++ b/botocore/compat.py @@ -23,7 +23,7 @@ from math import floor from botocore.vendored import six from botocore.exceptions import MD5UnavailableError -from botocore.vendored.requests.packages.urllib3 import exceptions +from urllib3 import exceptions logger = logging.getLogger(__name__) diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index cc0dc9f6..998c42fd 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -27,7 +27,7 @@ {"shape":"BadRequestException"}, {"shape":"ConflictException"} ], - "documentation":"

Create an ApiKey resource.

" + "documentation":"

Create an ApiKey resource.

" }, "CreateAuthorizer":{ "name":"CreateAuthorizer", @@ -45,7 +45,7 @@ {"shape":"LimitExceededException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Adds a new Authorizer resource to an existing RestApi resource.

" + "documentation":"

Adds a new Authorizer resource to an existing RestApi resource.

" }, "CreateBasePathMapping":{ "name":"CreateBasePathMapping", @@ -313,7 +313,7 @@ {"shape":"BadRequestException"}, {"shape":"ConflictException"} ], - "documentation":"

Deletes an existing Authorizer resource.

" + "documentation":"

Deletes an existing Authorizer resource.

" }, "DeleteBasePathMapping":{ "name":"DeleteBasePathMapping", @@ -732,7 +732,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Describe an existing Authorizer resource.

" + "documentation":"

Describe an existing Authorizer resource.

" }, "GetAuthorizers":{ "name":"GetAuthorizers", @@ -748,7 +748,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Describe an existing Authorizers resource.

" + "documentation":"

Describe an existing Authorizers resource.

" }, "GetBasePathMapping":{ "name":"GetBasePathMapping", @@ -1579,7 +1579,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

" + "documentation":"

Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

" }, "TestInvokeMethod":{ "name":"TestInvokeMethod", @@ -1661,7 +1661,7 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Updates an existing Authorizer resource.

" + "documentation":"

Updates an existing Authorizer resource.

" }, "UpdateBasePathMapping":{ "name":"UpdateBasePathMapping", @@ -1990,7 +1990,7 @@ "members":{ "format":{ "shape":"String", - "documentation":"

A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.

" + "documentation":"

A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.

" }, "destinationArn":{ "shape":"String", @@ -2019,7 +2019,7 @@ "documentation":"

The version of the API keys used for the account.

" } }, - "documentation":"

Represents an AWS account that is associated with API Gateway.

To view the account info, call GET on this resource.

Error Codes

The following exception may be thrown when the request fails.

  • UnauthorizedException
  • NotFoundException
  • TooManyRequestsException

For detailed error code information, including the corresponding HTTP Status Codes, see API Gateway Error Codes

Example: Get the information about an account.

Request
GET /account HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160531T184618Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} 
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/account-apigateway-{rel}.html\", \"name\": \"account\", \"templated\": true }, \"self\": { \"href\": \"/account\" }, \"account:update\": { \"href\": \"/account\" } }, \"cloudwatchRoleArn\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"throttleSettings\": { \"rateLimit\": 500, \"burstLimit\": 1000 } } 

In addition to making the REST API call directly, you can use the AWS CLI and an AWS SDK to access this resource.

" + "documentation":"

Represents an AWS account that is associated with API Gateway.

To view the account info, call GET on this resource.

Error Codes

The following exception may be thrown when the request fails.

  • UnauthorizedException
  • NotFoundException
  • TooManyRequestsException

For detailed error code information, including the corresponding HTTP Status Codes, see API Gateway Error Codes

Example: Get the information about an account.

Request
GET /account HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160531T184618Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} 
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/account-apigateway-{rel}.html\", \"name\": \"account\", \"templated\": true }, \"self\": { \"href\": \"/account\" }, \"account:update\": { \"href\": \"/account\" } }, \"cloudwatchRoleArn\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"throttleSettings\": { \"rateLimit\": 500, \"burstLimit\": 1000 } } 

In addition to making the REST API call directly, you can use the AWS CLI and an AWS SDK to access this resource.

" }, "ApiKey":{ "type":"structure", @@ -2061,7 +2061,7 @@ "documentation":"

A list of Stage resources that are associated with the ApiKey resource.

" } }, - "documentation":"

A resource that can be distributed to callers for executing Method resources that require an API key. API keys can be mapped to any Stage on any RestApi, which indicates that the callers with the API key can make requests to that stage.

" + "documentation":"

A resource that can be distributed to callers for executing Method resources that require an API key. API keys can be mapped to any Stage on any RestApi, which indicates that the callers with the API key can make requests to that stage.

" }, "ApiKeyIds":{ "type":"structure", @@ -2098,7 +2098,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of API keys as represented by an ApiKeys resource.

" + "documentation":"

Represents a collection of API keys as represented by an ApiKeys resource.

" }, "ApiKeysFormat":{ "type":"string", @@ -2143,7 +2143,7 @@ }, "authType":{ "shape":"String", - "documentation":"

Optional customer-defined field, used in Swagger imports and exports without functional impact.

" + "documentation":"

Optional customer-defined field, used in OpenAPI imports and exports without functional impact.

" }, "authorizerUri":{ "shape":"String", @@ -2166,7 +2166,7 @@ "documentation":"

The TTL in seconds of cached authorizer results. If it equals 0, authorization caching is disabled. If it is greater than 0, API Gateway will cache authorizer responses. If this field is not set, the default value is 300. The maximum value is 3600, or 1 hour.

" } }, - "documentation":"

Represents an authorization layer for methods. If enabled on a method, API Gateway will activate the authorizer when a client calls the method.

" + "documentation":"

Represents an authorization layer for methods. If enabled on a method, API Gateway will activate the authorizer when a client calls the method.

" }, "AuthorizerType":{ "type":"string", @@ -2187,7 +2187,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of Authorizer resources.

" + "documentation":"

Represents a collection of Authorizer resources.

" }, "BadRequestException":{ "type":"structure", @@ -2214,7 +2214,7 @@ "documentation":"

The name of the associated stage.

" } }, - "documentation":"

Represents the base path that callers of the API must provide as part of the URL after the domain name.

A custom domain name plus a BasePathMapping specification identifies a deployed RestApi in a given stage of the owner Account.
" + "documentation":"

Represents the base path that callers of the API must provide as part of the URL after the domain name.

A custom domain name plus a BasePathMapping specification identifies a deployed RestApi in a given stage of the owner Account.
" }, "BasePathMappings":{ "type":"structure", @@ -2226,7 +2226,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of BasePathMapping resources.

" + "documentation":"

Represents a collection of BasePathMapping resources.

" }, "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, @@ -2301,7 +2301,7 @@ "documentation":"

The timestamp when the client certificate will expire.

" } }, - "documentation":"

Represents a client certificate used to configure client-side SSL authentication while sending requests to the integration endpoint.

Client certificates are used to authenticate an API by the backend server. To authenticate an API client (or user), use IAM roles and policies, a custom Authorizer or an Amazon Cognito user pool.
" + "documentation":"

Represents a client certificate used to configure client-side SSL authentication while sending requests to the integration endpoint.

Client certificates are used to authenticate an API by the backend server. To authenticate an API client (or user), use IAM roles and policies, a custom Authorizer or an Amazon Cognito user pool.
" }, "ClientCertificates":{ "type":"structure", @@ -2313,7 +2313,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of ClientCertificate resources.

" + "documentation":"

Represents a collection of ClientCertificate resources.

" }, "ConflictException":{ "type":"structure", @@ -2400,7 +2400,7 @@ }, "authType":{ "shape":"String", - "documentation":"

Optional customer-defined field, used in Swagger imports and exports without functional impact.

" + "documentation":"

Optional customer-defined field, used in OpenAPI imports and exports without functional impact.

" }, "authorizerUri":{ "shape":"String", @@ -2490,6 +2490,10 @@ "canarySettings":{ "shape":"DeploymentCanarySettings", "documentation":"

The input configuration for the canary deployment when the deployment is a canary release deployment.

" + }, + "tracingEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Specifies whether active tracing with X-ray is enabled for the Stage.

" } }, "documentation":"

Requests API Gateway to create a Deployment resource.

" @@ -2514,7 +2518,7 @@ }, "properties":{ "shape":"String", - "documentation":"

[Required] The new documentation content map of the targeted API entity. Enclosed key-value pairs are API-specific, but only Swagger-compliant key-value pairs can be exported and, hence, published.

" + "documentation":"

[Required] The new documentation content map of the targeted API entity. Enclosed key-value pairs are API-specific, but only OpenAPI-compliant key-value pairs can be exported and, hence, published.

" } }, "documentation":"

Creates a new documentation part of a given API.

" @@ -2764,6 +2768,10 @@ "shape":"CanarySettings", "documentation":"

The canary deployment settings of this stage.

" }, + "tracingEnabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether active tracing with X-ray is enabled for the Stage.

" + }, "tags":{ "shape":"MapOfStringToString", "documentation":"

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters.

" @@ -3315,7 +3323,7 @@ "documentation":"

A summary of the RestApi at the date and time that the deployment resource was created.

" } }, - "documentation":"

An immutable representation of a RestApi resource that can be called by users using Stages. A deployment must be associated with a Stage for it to be callable over the Internet.

To create a deployment, call POST on the Deployments resource of a RestApi. To view, update, or delete a deployment, call GET, PATCH, or DELETE on the specified deployment resource (/restapis/{restapi_id}/deployments/{deployment_id}).
" + "documentation":"

An immutable representation of a RestApi resource that can be called by users using Stages. A deployment must be associated with a Stage for it to be callable over the Internet.

To create a deployment, call POST on the Deployments resource of a RestApi. To view, update, or delete a deployment, call GET, PATCH, or DELETE on the specified deployment resource (/restapis/{restapi_id}/deployments/{deployment_id}).
" }, "DeploymentCanarySettings":{ "type":"structure", @@ -3345,7 +3353,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection resource that contains zero or more references to your existing deployments, and links that guide you on how to interact with your collection. The collection offers a paginated view of the contained deployments.

To create a new deployment of a RestApi, make a POST request against this resource. To view, update, or delete an existing deployment, make a GET, PATCH, or DELETE request, respectively, on a specified Deployment resource.
" + "documentation":"

Represents a collection resource that contains zero or more references to your existing deployments, and links that guide you on how to interact with your collection. The collection offers a paginated view of the contained deployments.

To create a new deployment of a RestApi, make a POST request against this resource. To view, update, or delete an existing deployment, make a GET, PATCH, or DELETE request, respectively, on a specified Deployment resource.
" }, "DocumentationPart":{ "type":"structure", @@ -3360,10 +3368,10 @@ }, "properties":{ "shape":"String", - "documentation":"

A content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., \"{ \\\"description\\\": \\\"The API does ...\\\" }\". Only Swagger-compliant documentation-related fields from the properties map are exported and, hence, published as part of the API entity definitions, while the original documentation parts are exported in a Swagger extension of x-amazon-apigateway-documentation.

" + "documentation":"

A content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., \"{ \\\"description\\\": \\\"The API does ...\\\" }\". Only OpenAPI-compliant documentation-related fields from the properties map are exported and, hence, published as part of the API entity definitions, while the original documentation parts are exported in a OpenAPI extension of x-amazon-apigateway-documentation.

" } }, - "documentation":"

A documentation part for a targeted API entity.

A documentation part consists of a content map (properties) and a target (location). The target specifies an API entity to which the documentation content applies. The supported API entity types are API, AUTHORIZER, MODEL, RESOURCE, METHOD, PATH_PARAMETER, QUERY_PARAMETER, REQUEST_HEADER, REQUEST_BODY, RESPONSE, RESPONSE_HEADER, and RESPONSE_BODY. Valid location fields depend on the API entity type. All valid fields are not required.

The content map is a JSON string of API-specific key-value pairs. Although an API can use any shape for the content map, only the Swagger-compliant documentation fields will be injected into the associated API entity definition in the exported Swagger definition file.

" + "documentation":"

A documentation part for a targeted API entity.

A documentation part consists of a content map (properties) and a target (location). The target specifies an API entity to which the documentation content applies. The supported API entity types are API, AUTHORIZER, MODEL, RESOURCE, METHOD, PATH_PARAMETER, QUERY_PARAMETER, REQUEST_HEADER, REQUEST_BODY, RESPONSE, RESPONSE_HEADER, and RESPONSE_BODY. Valid location fields depend on the API entity type. All valid fields are not required.

The content map is a JSON string of API-specific key-value pairs. Although an API can use any shape for the content map, only the OpenAPI-compliant documentation fields will be injected into the associated API entity definition in the exported OpenAPI definition file.

" }, "DocumentationPartIds":{ "type":"structure", @@ -3377,7 +3385,7 @@ "documentation":"

A list of warning messages reported during import of documentation parts.

" } }, - "documentation":"

A collection of the imported DocumentationPart identifiers.

This is used to return the result when documentation parts in an external (e.g., Swagger) file are imported into API Gateway
" + "documentation":"

A collection of the imported DocumentationPart identifiers.

This is used to return the result when documentation parts in an external (e.g., OpenAPI) file are imported into API Gateway
" }, "DocumentationPartLocation":{ "type":"structure", @@ -3437,7 +3445,7 @@ "locationName":"item" } }, - "documentation":"

The collection of documentation parts of an API.

" + "documentation":"

The collection of documentation parts of an API.

" }, "DocumentationVersion":{ "type":"structure", @@ -3455,7 +3463,7 @@ "documentation":"

The description of the API documentation snapshot.

" } }, - "documentation":"

A snapshot of the documentation of an API.

Publishing API documentation involves creating a documentation version associated with an API stage and exporting the versioned documentation to an external (e.g., Swagger) file.

" + "documentation":"

A snapshot of the documentation of an API.

Publishing API documentation involves creating a documentation version associated with an API stage and exporting the versioned documentation to an external (e.g., OpenAPI) file.

" }, "DocumentationVersions":{ "type":"structure", @@ -3467,7 +3475,7 @@ "locationName":"item" } }, - "documentation":"

The collection of documentation snapshots of an API.

Use the DocumentationVersions to manage documentation snapshots associated with various API stages.

" + "documentation":"

The collection of documentation snapshots of an API.

Use the DocumentationVersions to manage documentation snapshots associated with various API stages.

" }, "DomainName":{ "type":"structure", @@ -3494,7 +3502,7 @@ }, "regionalHostedZoneId":{ "shape":"String", - "documentation":"

The region-specific Amazon Route 53 Hosted Zone ID of the regional endpoint. For more information, see Set up a Regional Custom Domain Name and AWS Regions and Endpoints for API Gateway.

" + "documentation":"

The region-specific Amazon Route 53 Hosted Zone ID of the regional endpoint. For more information, see Set up a Regional Custom Domain Name and AWS Regions and Endpoints for API Gateway.

" }, "regionalCertificateName":{ "shape":"String", @@ -3506,18 +3514,18 @@ }, "distributionDomainName":{ "shape":"String", - "documentation":"

The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation.

" + "documentation":"

The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation.

" }, "distributionHostedZoneId":{ "shape":"String", - "documentation":"

The region-agnostic Amazon Route 53 Hosted Zone ID of the edge-optimized endpoint. The valid value is Z2FDTNDATAQYW2 for all the regions. For more information, see Set up a Regional Custom Domain Name and AWS Regions and Endpoints for API Gateway.

" + "documentation":"

The region-agnostic Amazon Route 53 Hosted Zone ID of the edge-optimized endpoint. The valid value is Z2FDTNDATAQYW2 for all the regions. For more information, see Set up a Regional Custom Domain Name and AWS Regions and Endpoints for API Gateway.

" }, "endpointConfiguration":{ "shape":"EndpointConfiguration", "documentation":"

The endpoint configuration of this DomainName showing the endpoint types of the domain name.

" } }, - "documentation":"

Represents a custom domain name as a user-friendly host name of an API (RestApi).

When you deploy an API, API Gateway creates a default host name for the API. This default API host name is of the {restapi-id}.execute-api.{region}.amazonaws.com format. With the default host name, you can access the API's root resource with the URL of https://{restapi-id}.execute-api.{region}.amazonaws.com/{stage}/. When you set up a custom domain name of apis.example.com for this API, you can then access the same resource using the URL of the https://apis.examples.com/myApi, where myApi is the base path mapping (BasePathMapping) of your API under the custom domain name.

" + "documentation":"

Represents a custom domain name as a user-friendly host name of an API (RestApi).

When you deploy an API, API Gateway creates a default host name for the API. This default API host name is of the {restapi-id}.execute-api.{region}.amazonaws.com format. With the default host name, you can access the API's root resource with the URL of https://{restapi-id}.execute-api.{region}.amazonaws.com/{stage}/. When you set up a custom domain name of apis.example.com for this API, you can then access the same resource using the URL of the https://apis.examples.com/myApi, where myApi is the base path mapping (BasePathMapping) of your API under the custom domain name.

" }, "DomainNames":{ "type":"structure", @@ -3529,7 +3537,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of DomainName resources.

" + "documentation":"

Represents a collection of DomainName resources.

" }, "Double":{"type":"double"}, "EndpointConfiguration":{ @@ -3642,7 +3650,7 @@ "documentation":"

A Boolean flag to indicate whether this GatewayResponse is the default gateway response (true) or not (false). A default gateway response is one generated by API Gateway without any customization by an API developer.

" } }, - "documentation":"

A gateway response of a given response type and status code, with optional response parameters and mapping templates.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get a Gateway Response of a given response type

Request

This example shows how to get a gateway response of the MISSING_AUTHENTICATION_TOKEN type.

GET /restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 

The response type is specified as a URL path.

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:delete\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": false, \"responseParameters\": { \"gatewayresponse.header.x-request-path\": \"method.request.path.petId\", \"gatewayresponse.header.Access-Control-Allow-Origin\": \"'a.b.c'\", \"gatewayresponse.header.x-request-query\": \"method.request.querystring.q\", \"gatewayresponse.header.x-request-header\": \"method.request.header.Accept\" }, \"responseTemplates\": { \"application/json\": \"{\\n \\\"message\\\": $context.error.messageString,\\n \\\"type\\\": \\\"$context.error.responseType\\\",\\n \\\"stage\\\": \\\"$context.stage\\\",\\n \\\"resourcePath\\\": \\\"$context.resourcePath\\\",\\n \\\"stageVariables.a\\\": \\\"$stageVariables.a\\\",\\n \\\"statusCode\\\": \\\"'404'\\\"\\n}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"404\" }

" + "documentation":"

A gateway response of a given response type and status code, with optional response parameters and mapping templates.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get a Gateway Response of a given response type

Request

This example shows how to get a gateway response of the MISSING_AUTHENTICATION_TOKEN type.

GET /restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 

The response type is specified as a URL path.

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:delete\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": false, \"responseParameters\": { \"gatewayresponse.header.x-request-path\": \"method.request.path.petId\", \"gatewayresponse.header.Access-Control-Allow-Origin\": \"'a.b.c'\", \"gatewayresponse.header.x-request-query\": \"method.request.querystring.q\", \"gatewayresponse.header.x-request-header\": \"method.request.header.Accept\" }, \"responseTemplates\": { \"application/json\": \"{\\n \\\"message\\\": $context.error.messageString,\\n \\\"type\\\": \\\"$context.error.responseType\\\",\\n \\\"stage\\\": \\\"$context.stage\\\",\\n \\\"resourcePath\\\": \\\"$context.resourcePath\\\",\\n \\\"stageVariables.a\\\": \\\"$stageVariables.a\\\",\\n \\\"statusCode\\\": \\\"'404'\\\"\\n}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"404\" }

" }, "GatewayResponseType":{ "type":"string", @@ -3679,7 +3687,7 @@ "locationName":"item" } }, - "documentation":"

The collection of the GatewayResponse instances of a RestApi as a responseType-to-GatewayResponse object map of key-value pairs. As such, pagination is not supported for querying this collection.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get the collection of gateway responses of an API

Request

This example request shows how to retrieve the GatewayResponses collection from an API.

GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"first\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"gatewayresponse:by-type\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"item\": [ { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } ] }, \"_embedded\": { \"item\": [ { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_FAILURE\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"RESOURCE_NOT_FOUND\", \"statusCode\": \"404\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"REQUEST_TOO_LARGE\", \"statusCode\": \"413\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"THROTTLED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNSUPPORTED_MEDIA_TYPE\", \"statusCode\": \"415\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_5XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_4XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_PARAMETERS\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_BODY\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"EXPIRED_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"ACCESS_DENIED\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_API_KEY\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNAUTHORIZED\", \"statusCode\": \"401\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"API_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"QUOTA_EXCEEDED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_TIMEOUT\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_SIGNATURE\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_FAILURE\", \"statusCode\": \"500\" } ] } }

" + "documentation":"

The collection of the GatewayResponse instances of a RestApi as a responseType-to-GatewayResponse object map of key-value pairs. As such, pagination is not supported for querying this collection.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get the collection of gateway responses of an API

Request

This example request shows how to retrieve the GatewayResponses collection from an API.

GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"first\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"gatewayresponse:by-type\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"item\": [ { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } ] }, \"_embedded\": { \"item\": [ { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_FAILURE\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"RESOURCE_NOT_FOUND\", \"statusCode\": \"404\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"REQUEST_TOO_LARGE\", \"statusCode\": \"413\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"THROTTLED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNSUPPORTED_MEDIA_TYPE\", \"statusCode\": \"415\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_5XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_4XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_PARAMETERS\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_BODY\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"EXPIRED_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"ACCESS_DENIED\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_API_KEY\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNAUTHORIZED\", \"statusCode\": \"401\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"API_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"QUOTA_EXCEEDED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_TIMEOUT\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_SIGNATURE\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_FAILURE\", \"statusCode\": \"500\" } ] } }

" }, "GenerateClientCertificateRequest":{ "type":"structure", @@ -4101,18 +4109,18 @@ }, "exportType":{ "shape":"String", - "documentation":"

[Required] The type of export. Currently only 'swagger' is supported.

", + "documentation":"

[Required] The type of export. Acceptable values are 'oas30' for OpenAPI 3.0.x and 'swagger' for Swagger/OpenAPI 2.0.

", "location":"uri", "locationName":"export_type" }, "parameters":{ "shape":"MapOfStringToString", - "documentation":"

A key-value map of query string parameters that specify properties of the export, depending on the requested exportType. For exportType swagger, any combination of the following parameters are supported: extensions='integrations' or extensions='apigateway' will export the API with x-amazon-apigateway-integration extensions. extensions='authorizers' will export the API with x-amazon-apigateway-authorizer extensions. postman will export the API with Postman extensions, allowing for import to the Postman tool

", + "documentation":"

A key-value map of query string parameters that specify properties of the export, depending on the requested exportType. For exportType oas30 and swagger, any combination of the following parameters are supported: extensions='integrations' or extensions='apigateway' will export the API with x-amazon-apigateway-integration extensions. extensions='authorizers' will export the API with x-amazon-apigateway-authorizer extensions. postman will export the API with Postman extensions, allowing for import to the Postman tool

", "location":"querystring" }, "accepts":{ "shape":"String", - "documentation":"

The content-type of the export, for example application/json. Currently application/json and application/yaml are supported for exportType of swagger. This should be specified in the Accept header for direct API requests.

", + "documentation":"

The content-type of the export, for example application/json. Currently application/json and application/yaml are supported for exportType ofoas30 and swagger. This should be specified in the Accept header for direct API requests.

", "location":"header", "locationName":"Accept" } @@ -4816,7 +4824,7 @@ "members":{ "body":{ "shape":"Blob", - "documentation":"

The payload of the POST request to import API keys. For the payload format, see API Key File Format.

" + "documentation":"

The payload of the POST request to import API keys. For the payload format, see API Key File Format.

" }, "format":{ "shape":"ApiKeysFormat", @@ -4861,10 +4869,10 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] Raw byte array representing the to-be-imported documentation parts. To import from a Swagger file, this is a JSON object.

" + "documentation":"

[Required] Raw byte array representing the to-be-imported documentation parts. To import from an OpenAPI file, this is a JSON object.

" } }, - "documentation":"

Import documentation parts from an external (e.g., Swagger) definition file.

", + "documentation":"

Import documentation parts from an external (e.g., OpenAPI) definition file.

", "payload":"body" }, "ImportRestApiRequest":{ @@ -4884,7 +4892,7 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] The POST request body containing external API definitions. Currently, only Swagger definition JSON files are supported. The maximum size of the API definition file is 2MB.

" + "documentation":"

[Required] The POST request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 2MB.

" } }, "documentation":"

A POST request to import an API to API Gateway using an input of an API definition file.

", @@ -4904,7 +4912,7 @@ }, "uri":{ "shape":"String", - "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" + "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" }, "connectionType":{ "shape":"ConnectionType", @@ -4912,7 +4920,7 @@ }, "connectionId":{ "shape":"String", - "documentation":"

The (id) of the VpcLink used for the integration when connectionType=VPC_LINK and undefined, otherwise.

" + "documentation":"

The (id) of the VpcLink used for the integration when connectionType=VPC_LINK and undefined, otherwise.

" }, "credentials":{ "shape":"String", @@ -4948,10 +4956,10 @@ }, "integrationResponses":{ "shape":"MapOfIntegrationResponse", - "documentation":"

Specifies the integration's responses.

Example: Get integration responses of a method

Request

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160607T191449Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160607/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} 
Response

The successful response returns 200 OK status and a payload as follows:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E#foreach($stream in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\\\")\\n\" }, \"statusCode\": \"200\" }

" + "documentation":"

Specifies the integration's responses.

Example: Get integration responses of a method

Request

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160607T191449Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160607/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} 
Response

The successful response returns 200 OK status and a payload as follows:

{ \"_links\": { \"curies\": { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E#foreach($stream in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\\\")\\n\" }, \"statusCode\": \"200\" }

" } }, - "documentation":"

Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

In the API Gateway console, the built-in Lambda integration is an AWS integration.
" + "documentation":"

Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

In the API Gateway console, the built-in Lambda integration is an AWS integration.
" }, "IntegrationResponse":{ "type":"structure", @@ -4977,7 +4985,7 @@ "documentation":"

Specifies how to handle response payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:

  • CONVERT_TO_BINARY: Converts a response payload from a Base64-encoded string to the corresponding binary blob.

  • CONVERT_TO_TEXT: Converts a response payload from a binary blob to a Base64-encoded string.

If this property is not defined, the response payload will be passed through from the integration response to the method response without modification.

" } }, - "documentation":"

Represents an integration response. The status code must map to an existing MethodResponse, and parameters and templates can be used to transform the back-end response.

" + "documentation":"

Represents an integration response. The status code must map to an existing MethodResponse, and parameters and templates can be used to transform the back-end response.

" }, "IntegrationType":{ "type":"string", @@ -5201,7 +5209,7 @@ }, "operationName":{ "shape":"String", - "documentation":"

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.

" + "documentation":"

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.

" }, "requestParameters":{ "shape":"MapOfStringToBoolean", @@ -5213,18 +5221,18 @@ }, "methodResponses":{ "shape":"MapOfMethodResponse", - "documentation":"

Gets a method response associated with a given HTTP status code.

The collection of method responses are encapsulated in a key-value map, where the key is a response's HTTP status code and the value is a MethodResponse resource that specifies the response returned to the caller from the back end through the integration response.

Example: Get a 200 OK response of a GET method

Request

GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com Content-Length: 117 X-Amz-Date: 20160613T215008Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.operator\": false, \"method.response.header.operand_2\": false, \"method.response.header.operand_1\": false }, \"statusCode\": \"200\" }

" + "documentation":"

Gets a method response associated with a given HTTP status code.

The collection of method responses are encapsulated in a key-value map, where the key is a response's HTTP status code and the value is a MethodResponse resource that specifies the response returned to the caller from the back end through the integration response.

Example: Get a 200 OK response of a GET method

Request

GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com Content-Length: 117 X-Amz-Date: 20160613T215008Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.operator\": false, \"method.response.header.operand_2\": false, \"method.response.header.operand_1\": false }, \"statusCode\": \"200\" }

" }, "methodIntegration":{ "shape":"Integration", - "documentation":"

Gets the method's integration responsible for passing the client-submitted request to the back end and performing necessary transformations to make the request compliant with the back end.

Example:

Request

GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com Content-Length: 117 X-Amz-Date: 20160613T213210Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": [ { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html\", \"name\": \"integration\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true } ], \"self\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration\" }, \"integration:delete\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration\" }, \"integration:responses\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integration:update\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration\" }, \"integrationresponse:put\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/{status_code}\", \"templated\": true } }, \"cacheKeyParameters\": [], \"cacheNamespace\": \"0cjtch\", \"credentials\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"httpMethod\": \"POST\", \"passthroughBehavior\": \"WHEN_NO_MATCH\", \"requestTemplates\": { \"application/json\": \"{\\n \\\"a\\\": \\\"$input.params('operand1')\\\",\\n \\\"b\\\": \\\"$input.params('operand2')\\\", \\n \\\"op\\\": \\\"$input.params('operator')\\\" \\n}\" }, \"type\": \"AWS\", \"uri\": \"arn:aws:apigateway:us-west-2:lambda:path//2015-03-31/functions/arn:aws:lambda:us-west-2:123456789012:function:Calc/invocations\", \"_embedded\": { \"integration:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.operator\": \"integration.response.body.op\", \"method.response.header.operand_2\": \"integration.response.body.b\", \"method.response.header.operand_1\": \"integration.response.body.a\" }, \"responseTemplates\": { \"application/json\": \"#set($res = $input.path('$'))\\n{\\n \\\"result\\\": \\\"$res.a, $res.b, $res.op => $res.c\\\",\\n \\\"a\\\" : \\\"$res.a\\\",\\n \\\"b\\\" : \\\"$res.b\\\",\\n \\\"op\\\" : \\\"$res.op\\\",\\n \\\"c\\\" : \\\"$res.c\\\"\\n}\" }, \"selectionPattern\": \"\", \"statusCode\": \"200\" } } }

" + "documentation":"

Gets the method's integration responsible for passing the client-submitted request to the back end and performing necessary transformations to make the request compliant with the back end.

Example:

Request

GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com Content-Length: 117 X-Amz-Date: 20160613T213210Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": [ { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html\", \"name\": \"integration\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true } ], \"self\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration\" }, \"integration:delete\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration\" }, \"integration:responses\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integration:update\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration\" }, \"integrationresponse:put\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/{status_code}\", \"templated\": true } }, \"cacheKeyParameters\": [], \"cacheNamespace\": \"0cjtch\", \"credentials\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"httpMethod\": \"POST\", \"passthroughBehavior\": \"WHEN_NO_MATCH\", \"requestTemplates\": { \"application/json\": \"{\\n \\\"a\\\": \\\"$input.params('operand1')\\\",\\n \\\"b\\\": \\\"$input.params('operand2')\\\", \\n \\\"op\\\": \\\"$input.params('operator')\\\" \\n}\" }, \"type\": \"AWS\", \"uri\": \"arn:aws:apigateway:us-west-2:lambda:path//2015-03-31/functions/arn:aws:lambda:us-west-2:123456789012:function:Calc/invocations\", \"_embedded\": { \"integration:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.operator\": \"integration.response.body.op\", \"method.response.header.operand_2\": \"integration.response.body.b\", \"method.response.header.operand_1\": \"integration.response.body.a\" }, \"responseTemplates\": { \"application/json\": \"#set($res = $input.path('$'))\\n{\\n \\\"result\\\": \\\"$res.a, $res.b, $res.op => $res.c\\\",\\n \\\"a\\\" : \\\"$res.a\\\",\\n \\\"b\\\" : \\\"$res.b\\\",\\n \\\"op\\\" : \\\"$res.op\\\",\\n \\\"c\\\" : \\\"$res.c\\\"\\n}\" }, \"selectionPattern\": \"\", \"statusCode\": \"200\" } } }

" }, "authorizationScopes":{ "shape":"ListOfString", "documentation":"

A list of authorization scopes configured on the method. The scopes are used with a COGNITO_USER_POOLS authorizer to authorize the method invocation. The authorization works by matching the method scopes against the scopes parsed from the access token in the incoming request. The method invocation is authorized if any method scopes matches a claimed scope in the access token. Otherwise, the invocation is not authorized. When the method scope is configured, the client must provide an access token instead of an identity token for authorization purposes.

" } }, - "documentation":"

Represents a client-facing interface by which the client calls the API to access back-end resources. A Method resource is integrated with an Integration resource. Both consist of a request and one or more responses. The method request takes the client input that is passed to the back end through the integration request. A method response returns the output from the back end to the client through an integration response. A method request is embodied in a Method resource, whereas an integration request is embodied in an Integration resource. On the other hand, a method response is represented by a MethodResponse resource, whereas an integration response is represented by an IntegrationResponse resource.

Example: Retrive the GET method on a specified resource

Request

The following example request retrieves the information about the GET method on an API resource (3kzxbg5sa2) of an API (fugvjdxtri).

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T210259Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": [ { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html\", \"name\": \"integration\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html\", \"name\": \"method\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true } ], \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\", \"name\": \"GET\", \"title\": \"GET\" }, \"integration:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"method:integration\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"method:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"methodresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}\", \"templated\": true } }, \"apiKeyRequired\": true, \"authorizationType\": \"NONE\", \"httpMethod\": \"GET\", \"_embedded\": { \"method:integration\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integration:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integrationresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}\", \"templated\": true } }, \"cacheKeyParameters\": [], \"cacheNamespace\": \"3kzxbg5sa2\", \"credentials\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"httpMethod\": \"POST\", \"passthroughBehavior\": \"WHEN_NO_MATCH\", \"requestParameters\": { \"integration.request.header.Content-Type\": \"'application/x-amz-json-1.1'\" }, \"requestTemplates\": { \"application/json\": \"{\\n}\" }, \"type\": \"AWS\", \"uri\": \"arn:aws:apigateway:us-east-1:kinesis:action/ListStreams\", \"_embedded\": { \"integration:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E\\\")\" }, \"statusCode\": \"200\" } } }, \"method:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.Content-Type\": false }, \"statusCode\": \"200\" } } }

In the example above, the response template for the 200 OK response maps the JSON output from the ListStreams action in the back end to an XML output. The mapping template is URL-encoded as %3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E and the output is decoded using the $util.urlDecode() helper function.

" + "documentation":"

Represents a client-facing interface by which the client calls the API to access back-end resources. A Method resource is integrated with an Integration resource. Both consist of a request and one or more responses. The method request takes the client input that is passed to the back end through the integration request. A method response returns the output from the back end to the client through an integration response. A method request is embodied in a Method resource, whereas an integration request is embodied in an Integration resource. On the other hand, a method response is represented by a MethodResponse resource, whereas an integration response is represented by an IntegrationResponse resource.

Example: Retrive the GET method on a specified resource

Request

The following example request retrieves the information about the GET method on an API resource (3kzxbg5sa2) of an API (fugvjdxtri).

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T210259Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns a 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": [ { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html\", \"name\": \"integration\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html\", \"name\": \"method\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true } ], \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\", \"name\": \"GET\", \"title\": \"GET\" }, \"integration:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"method:integration\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"method:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"methodresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}\", \"templated\": true } }, \"apiKeyRequired\": true, \"authorizationType\": \"NONE\", \"httpMethod\": \"GET\", \"_embedded\": { \"method:integration\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integration:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integrationresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}\", \"templated\": true } }, \"cacheKeyParameters\": [], \"cacheNamespace\": \"3kzxbg5sa2\", \"credentials\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"httpMethod\": \"POST\", \"passthroughBehavior\": \"WHEN_NO_MATCH\", \"requestParameters\": { \"integration.request.header.Content-Type\": \"'application/x-amz-json-1.1'\" }, \"requestTemplates\": { \"application/json\": \"{\\n}\" }, \"type\": \"AWS\", \"uri\": \"arn:aws:apigateway:us-east-1:kinesis:action/ListStreams\", \"_embedded\": { \"integration:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E\\\")\" }, \"statusCode\": \"200\" } } }, \"method:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.Content-Type\": false }, \"statusCode\": \"200\" } } }

In the example above, the response template for the 200 OK response maps the JSON output from the ListStreams action in the back end to an XML output. The mapping template is URL-encoded as %3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E and the output is decoded using the $util.urlDecode() helper function.

" }, "MethodResponse":{ "type":"structure", @@ -5242,7 +5250,7 @@ "documentation":"

Specifies the Model resources used for the response's content-type. Response models are represented as a key/value map, with a content-type as the key and a Model name as the value.

" } }, - "documentation":"

Represents a method response of a given HTTP status code returned to the client. The method response is passed from the back end through the associated integration response that can be transformed using a mapping template.

Example: A MethodResponse instance of an API

Request

The example request retrieves a MethodResponse of the 200 status code.

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T222952Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns 200 OK status and a payload as follows:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.Content-Type\": false }, \"statusCode\": \"200\" }

" + "documentation":"

Represents a method response of a given HTTP status code returned to the client. The method response is passed from the back end through the associated integration response that can be transformed using a mapping template.

Example: A MethodResponse instance of an API

Request

The example request retrieves a MethodResponse of the 200 status code.

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T222952Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response

The successful response returns 200 OK status and a payload as follows:

{ \"_links\": { \"curies\": { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.Content-Type\": false }, \"statusCode\": \"200\" }

" }, "MethodSetting":{ "type":"structure", @@ -5253,11 +5261,11 @@ }, "loggingLevel":{ "shape":"String", - "documentation":"

Specifies the logging level for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO.

" + "documentation":"

Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO.

" }, "dataTraceEnabled":{ "shape":"Boolean", - "documentation":"

Specifies whether data trace logging is enabled for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/dataTrace, and the value is a Boolean.

" + "documentation":"

Specifies whether data trace logging is enabled for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/dataTrace, and the value is a Boolean.

" }, "throttlingBurstLimit":{ "shape":"Integer", @@ -5328,7 +5336,7 @@ "documentation":"

The content-type for the model.

" } }, - "documentation":"

Represents the data structure of a method's request or response payload.

A request model defines the data structure of the client-supplied request payload. A response model defines the data structure of the response payload returned by the back end. Although not required, models are useful for mapping payloads between the front end and back end.

A model is used for generating an API's SDK, validating the input request body, and creating a skeletal mapping template.

" + "documentation":"

Represents the data structure of a method's request or response payload.

A request model defines the data structure of the client-supplied request payload. A response model defines the data structure of the response payload returned by the back end. Although not required, models are useful for mapping payloads between the front end and back end.

A model is used for generating an API's SDK, validating the input request body, and creating a skeletal mapping template.

" }, "Models":{ "type":"structure", @@ -5340,7 +5348,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of Model resources.

" + "documentation":"

Represents a collection of Model resources.

" }, "NotFoundException":{ "type":"structure", @@ -5377,7 +5385,7 @@ }, "value":{ "shape":"String", - "documentation":"

The new target value of the update operation. It is applicable for the add or replace operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{\"a\": ...}'. In a Windows shell, see Using JSON for Parameters.

" + "documentation":"

The new target value of the update operation. It is applicable for the add or replace operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{\"a\": ...}'. In a Windows shell, see Using JSON for Parameters.

" }, "from":{ "shape":"String", @@ -5464,7 +5472,7 @@ }, "uri":{ "shape":"String", - "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" + "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" }, "connectionType":{ "shape":"ConnectionType", @@ -5472,7 +5480,7 @@ }, "connectionId":{ "shape":"String", - "documentation":"

The (id) of the VpcLink used for the integration when connectionType=VPC_LINK and undefined, otherwise.

" + "documentation":"

The (id) of the VpcLink used for the integration when connectionType=VPC_LINK and undefined, otherwise.

" }, "credentials":{ "shape":"String", @@ -5602,7 +5610,7 @@ }, "operationName":{ "shape":"String", - "documentation":"

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.

" + "documentation":"

A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.

" }, "requestParameters":{ "shape":"MapOfStringToBoolean", @@ -5706,7 +5714,7 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] The PUT request body containing external API definitions. Currently, only Swagger definition JSON files are supported. The maximum size of the API definition file is 2MB.

" + "documentation":"

[Required] The PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 2MB.

" } }, "documentation":"

A PUT request to update an existing API, with external API definitions specified as the request body.

", @@ -5758,7 +5766,7 @@ "documentation":"

A Boolean flag to indicate whether to validate request parameters (true) or not (false).

" } }, - "documentation":"

A set of validation rules for incoming Method requests.

In Swagger, a RequestValidator of an API is defined by the x-amazon-apigateway-request-validators.requestValidator object. It the referenced using the x-amazon-apigateway-request-validator property.

" + "documentation":"

A set of validation rules for incoming Method requests.

In OpenAPI, a RequestValidator of an API is defined by the x-amazon-apigateway-request-validators.requestValidator object. It the referenced using the x-amazon-apigateway-request-validator property.

" }, "RequestValidators":{ "type":"structure", @@ -5770,7 +5778,7 @@ "locationName":"item" } }, - "documentation":"

A collection of RequestValidator resources of a given RestApi.

In Swagger, the RequestValidators of an API is defined by the x-amazon-apigateway-request-validators extension.

" + "documentation":"

A collection of RequestValidator resources of a given RestApi.

In OpenAPI, the RequestValidators of an API is defined by the x-amazon-apigateway-request-validators extension.

" }, "Resource":{ "type":"structure", @@ -5793,10 +5801,10 @@ }, "resourceMethods":{ "shape":"MapOfMethod", - "documentation":"

Gets an API resource's method of a given HTTP verb.

The resource methods are a map of methods indexed by methods' HTTP verbs enabled on the resource. This method map is included in the 200 OK response of the GET /restapis/{restapi_id}/resources/{resource_id} or GET /restapis/{restapi_id}/resources/{resource_id}?embed=methods request.

Example: Get the GET method of an API resource

Request
GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20170223T031827Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20170223/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response
{ \"_links\": { \"curies\": [ { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html\", \"name\": \"integration\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html\", \"name\": \"method\", \"templated\": true }, { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true } ], \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\", \"name\": \"GET\", \"title\": \"GET\" }, \"integration:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"method:integration\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"method:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"methodresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}\", \"templated\": true } }, \"apiKeyRequired\": false, \"authorizationType\": \"NONE\", \"httpMethod\": \"GET\", \"_embedded\": { \"method:integration\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integration:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integrationresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}\", \"templated\": true } }, \"cacheKeyParameters\": [], \"cacheNamespace\": \"3kzxbg5sa2\", \"credentials\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"httpMethod\": \"POST\", \"passthroughBehavior\": \"WHEN_NO_MATCH\", \"requestParameters\": { \"integration.request.header.Content-Type\": \"'application/x-amz-json-1.1'\" }, \"requestTemplates\": { \"application/json\": \"{\\n}\" }, \"type\": \"AWS\", \"uri\": \"arn:aws:apigateway:us-east-1:kinesis:action/ListStreams\", \"_embedded\": { \"integration:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E#foreach($stream in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\\\")\\n\" }, \"statusCode\": \"200\" } } }, \"method:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.Content-Type\": false }, \"statusCode\": \"200\" } } }

If the OPTIONS is enabled on the resource, you can follow the example here to get that method. Just replace the GET of the last path segment in the request URL with OPTIONS.

" + "documentation":"

Gets an API resource's method of a given HTTP verb.

The resource methods are a map of methods indexed by methods' HTTP verbs enabled on the resource. This method map is included in the 200 OK response of the GET /restapis/{restapi_id}/resources/{resource_id} or GET /restapis/{restapi_id}/resources/{resource_id}?embed=methods request.

Example: Get the GET method of an API resource

Request
GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20170223T031827Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20170223/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash}
Response
{ \"_links\": { \"curies\": [ { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html\", \"name\": \"integration\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html\", \"name\": \"method\", \"templated\": true }, { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html\", \"name\": \"methodresponse\", \"templated\": true } ], \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\", \"name\": \"GET\", \"title\": \"GET\" }, \"integration:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"method:integration\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"method:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"method:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET\" }, \"methodresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}\", \"templated\": true } }, \"apiKeyRequired\": false, \"authorizationType\": \"NONE\", \"httpMethod\": \"GET\", \"_embedded\": { \"method:integration\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integration:responses\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integration:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration\" }, \"integrationresponse:put\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}\", \"templated\": true } }, \"cacheKeyParameters\": [], \"cacheNamespace\": \"3kzxbg5sa2\", \"credentials\": \"arn:aws:iam::123456789012:role/apigAwsProxyRole\", \"httpMethod\": \"POST\", \"passthroughBehavior\": \"WHEN_NO_MATCH\", \"requestParameters\": { \"integration.request.header.Content-Type\": \"'application/x-amz-json-1.1'\" }, \"requestTemplates\": { \"application/json\": \"{\\n}\" }, \"type\": \"AWS\", \"uri\": \"arn:aws:apigateway:us-east-1:kinesis:action/ListStreams\", \"_embedded\": { \"integration:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E#foreach($stream in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\\\")\\n\" }, \"statusCode\": \"200\" } } }, \"method:responses\": { \"_links\": { \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\", \"name\": \"200\", \"title\": \"200\" }, \"methodresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" }, \"methodresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200\" } }, \"responseModels\": { \"application/json\": \"Empty\" }, \"responseParameters\": { \"method.response.header.Content-Type\": false }, \"statusCode\": \"200\" } } }

If the OPTIONS is enabled on the resource, you can follow the example here to get that method. Just replace the GET of the last path segment in the request URL with OPTIONS.

" } }, - "documentation":"

Represents an API resource.

" + "documentation":"

Represents an API resource.

" }, "Resources":{ "type":"structure", @@ -5808,7 +5816,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of Resource resources.

" + "documentation":"

Represents a collection of Resource resources.

" }, "RestApi":{ "type":"structure", @@ -5858,7 +5866,7 @@ "documentation":"A stringified JSON policy document that applies to this RestApi regardless of the caller and Method configuration." } }, - "documentation":"

Represents a REST API.

" + "documentation":"

Represents a REST API.

" }, "RestApis":{ "type":"structure", @@ -5870,7 +5878,7 @@ "locationName":"item" } }, - "documentation":"

Contains references to your APIs and links that guide you in how to interact with your collection. A collection offers a paginated view of your APIs.

" + "documentation":"

Contains references to your APIs and links that guide you in how to interact with your collection. A collection offers a paginated view of your APIs.

" }, "SdkConfigurationProperty":{ "type":"structure", @@ -6021,6 +6029,10 @@ "shape":"CanarySettings", "documentation":"

Settings for the canary deployment in this stage.

" }, + "tracingEnabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether active tracing with X-ray is enabled for the Stage.

" + }, "tags":{ "shape":"MapOfStringToString", "documentation":"

The collection of tags. Each tag element is associated with a given resource.

" @@ -6034,7 +6046,7 @@ "documentation":"

The timestamp when the stage last updated.

" } }, - "documentation":"

Represents a unique identifier for a version of a deployed RestApi that is callable by users.

" + "documentation":"

Represents a unique identifier for a version of a deployed RestApi that is callable by users.

" }, "StageKey":{ "type":"structure", @@ -6058,7 +6070,7 @@ "documentation":"

The current page of elements from this collection.

" } }, - "documentation":"

A list of Stage resources that are associated with the ApiKey resource.

" + "documentation":"

A list of Stage resources that are associated with the ApiKey resource.

" }, "StatusCode":{ "type":"string", @@ -6101,10 +6113,10 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The Apache Velocity Template Language (VTL) template content used for the template resource.

" + "documentation":"

The Apache Velocity Template Language (VTL) template content used for the template resource.

" } }, - "documentation":"

Represents a mapping template used to transform a payload.

" + "documentation":"

Represents a mapping template used to transform a payload.

" }, "TestInvokeAuthorizerRequest":{ "type":"structure", @@ -6174,7 +6186,7 @@ "authorization":{"shape":"MapOfStringToList"}, "claims":{ "shape":"MapOfStringToString", - "documentation":"

The open identity claims, with any supported custom attributes, returned from the Cognito Your User Pool configured for the API.

" + "documentation":"

The open identity claims, with any supported custom attributes, returned from the Cognito Your User Pool configured for the API.

" } }, "documentation":"

Represents the response of the test invoke request for a custom Authorizer

" @@ -6252,7 +6264,7 @@ "documentation":"

The execution latency of the test invoke request.

" } }, - "documentation":"

Represents the response of the test invoke request in the HTTP method.

" + "documentation":"

Represents the response of the test invoke request in the HTTP method.

" }, "ThrottleSettings":{ "type":"structure", @@ -6888,7 +6900,7 @@ "locationName":"values" } }, - "documentation":"

Represents the usage data of a usage plan.

" + "documentation":"

Represents the usage data of a usage plan.

" }, "UsagePlan":{ "type":"structure", @@ -6922,7 +6934,7 @@ "documentation":"

The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.

" } }, - "documentation":"

Represents a usage plan than can specify who can assess associated API stages with specified request limits and quotas.

In a usage plan, you associate an API by specifying the API's Id and a stage name of the specified API. You add plan customers by adding API keys to the plan.

" + "documentation":"

Represents a usage plan than can specify who can assess associated API stages with specified request limits and quotas.

In a usage plan, you associate an API by specifying the API's Id and a stage name of the specified API. You add plan customers by adding API keys to the plan.

" }, "UsagePlanKey":{ "type":"structure", @@ -6944,7 +6956,7 @@ "documentation":"

The name of a usage plan key.

" } }, - "documentation":"

Represents a usage plan key to identify a plan customer.

To associate an API stage with a selected API key in a usage plan, you must create a UsagePlanKey resource to represent the selected ApiKey.

\" " + "documentation":"

Represents a usage plan key to identify a plan customer.

To associate an API stage with a selected API key in a usage plan, you must create a UsagePlanKey resource to represent the selected ApiKey.

\" " }, "UsagePlanKeys":{ "type":"structure", @@ -6956,7 +6968,7 @@ "locationName":"item" } }, - "documentation":"

Represents the collection of usage plan keys added to usage plans for the associated API keys and, possibly, other types of keys.

" + "documentation":"

Represents the collection of usage plan keys added to usage plans for the associated API keys and, possibly, other types of keys.

" }, "UsagePlans":{ "type":"structure", @@ -6968,7 +6980,7 @@ "locationName":"item" } }, - "documentation":"

Represents a collection of usage plans for an AWS account.

" + "documentation":"

Represents a collection of usage plans for an AWS account.

" }, "VpcLink":{ "type":"structure", @@ -7019,7 +7031,7 @@ "locationName":"item" } }, - "documentation":"

The collection of VPC links under the caller's account in a region.

" + "documentation":"

The collection of VPC links under the caller's account in a region.

" } }, "documentation":"Amazon API Gateway

Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on AWS Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

" diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 5bc8b0af..062f32e0 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -296,7 +296,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves a list that describes the permissions for a private image that you own.

" + "documentation":"

Retrieves a list that describes the permissions for shared AWS account IDs on a private image that you own.

" }, "DescribeImages":{ "name":"DescribeImages", @@ -310,7 +310,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves a list that describes one or more specified images, if the image names are provided. Otherwise, all images in the account are described.

" + "documentation":"

Retrieves a list that describes one or more specified images, if the image names or image ARNs are provided. Otherwise, all images in the account are described.

" }, "DescribeSessions":{ "name":"DescribeSessions", @@ -616,6 +616,39 @@ }, "documentation":"

Describes an application in the application catalog.

" }, + "ApplicationSettings":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Enables or disables persistent application settings for users during their streaming sessions.

" + }, + "SettingsGroup":{ + "shape":"SettingsGroup", + "documentation":"

The path prefix for the S3 bucket where users’ persistent application settings are stored. You can allow the same persistent application settings to be used across multiple stacks by specifying the same settings group for each stack.

" + } + }, + "documentation":"

The persistent application settings for users of a stack.

" + }, + "ApplicationSettingsResponse":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether persistent application settings are enabled for users during their streaming sessions.

" + }, + "SettingsGroup":{ + "shape":"SettingsGroup", + "documentation":"

The path prefix for the S3 bucket where users’ persistent application settings are stored.

" + }, + "S3BucketName":{ + "shape":"String", + "documentation":"

The S3 bucket where users’ persistent application settings are stored. When persistent application settings are enabled for the first time for an account in an AWS Region, an S3 bucket is created. The bucket is unique to the AWS account and the Region.

" + } + }, + "documentation":"

Describes the persistent application settings for users of a stack.

" + }, "Applications":{ "type":"list", "member":{"shape":"Application"} @@ -970,6 +1003,10 @@ "UserSettings":{ "shape":"UserSettingList", "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettings", + "documentation":"

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

" } } }, @@ -1270,7 +1307,7 @@ "members":{ "Names":{ "shape":"StringList", - "documentation":"

The names of the images to describe.

" + "documentation":"

The names of the public or private images to describe.

" }, "Arns":{ "shape":"ArnList", @@ -2190,6 +2227,10 @@ "EXPIRED" ] }, + "SettingsGroup":{ + "type":"string", + "max":100 + }, "SharedImagePermissions":{ "type":"structure", "required":[ @@ -2255,6 +2296,10 @@ "UserSettings":{ "shape":"UserSettingList", "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default these actions are enabled.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettingsResponse", + "documentation":"

The persistent application settings for users of the stack.

" } }, "documentation":"

Describes a stack.

" @@ -2666,6 +2711,10 @@ "UserSettings":{ "shape":"UserSettingList", "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettings", + "documentation":"

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

" } } }, diff --git a/botocore/data/cloudfront/2014-05-31/service-2.json b/botocore/data/cloudfront/2014-05-31/service-2.json index 975a6496..9410f8f9 100644 --- a/botocore/data/cloudfront/2014-05-31/service-2.json +++ b/botocore/data/cloudfront/2014-05-31/service-2.json @@ -5,6 +5,7 @@ "globalEndpoint":"cloudfront.amazonaws.com", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "protocol":"rest-xml" }, diff --git a/botocore/data/cloudfront/2014-10-21/service-2.json b/botocore/data/cloudfront/2014-10-21/service-2.json index 8e751657..7a7a2799 100644 --- a/botocore/data/cloudfront/2014-10-21/service-2.json +++ b/botocore/data/cloudfront/2014-10-21/service-2.json @@ -5,6 +5,7 @@ "globalEndpoint":"cloudfront.amazonaws.com", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "protocol":"rest-xml" }, diff --git a/botocore/data/cloudfront/2014-11-06/service-2.json b/botocore/data/cloudfront/2014-11-06/service-2.json index c58646ef..d1afd52a 100644 --- a/botocore/data/cloudfront/2014-11-06/service-2.json +++ b/botocore/data/cloudfront/2014-11-06/service-2.json @@ -6,6 +6,7 @@ "globalEndpoint":"cloudfront.amazonaws.com", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "protocol":"rest-xml" }, diff --git a/botocore/data/cloudfront/2015-04-17/service-2.json b/botocore/data/cloudfront/2015-04-17/service-2.json index 53756041..b900b163 100644 --- a/botocore/data/cloudfront/2015-04-17/service-2.json +++ b/botocore/data/cloudfront/2015-04-17/service-2.json @@ -6,6 +6,7 @@ "globalEndpoint":"cloudfront.amazonaws.com", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "protocol":"rest-xml" }, diff --git a/botocore/data/cloudfront/2015-07-27/service-2.json b/botocore/data/cloudfront/2015-07-27/service-2.json index f909a548..5ebd2f4c 100644 --- a/botocore/data/cloudfront/2015-07-27/service-2.json +++ b/botocore/data/cloudfront/2015-07-27/service-2.json @@ -6,6 +6,7 @@ "globalEndpoint":"cloudfront.amazonaws.com", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "protocol":"rest-xml" }, diff --git a/botocore/data/cloudfront/2015-09-17/service-2.json b/botocore/data/cloudfront/2015-09-17/service-2.json index 6e2ac454..277883dd 100644 --- a/botocore/data/cloudfront/2015-09-17/service-2.json +++ b/botocore/data/cloudfront/2015-09-17/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-01-13/service-2.json b/botocore/data/cloudfront/2016-01-13/service-2.json index d66c803b..22f148bf 100644 --- a/botocore/data/cloudfront/2016-01-13/service-2.json +++ b/botocore/data/cloudfront/2016-01-13/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-01-28/service-2.json b/botocore/data/cloudfront/2016-01-28/service-2.json index 18046819..59385f72 100644 --- a/botocore/data/cloudfront/2016-01-28/service-2.json +++ b/botocore/data/cloudfront/2016-01-28/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-08-01/service-2.json b/botocore/data/cloudfront/2016-08-01/service-2.json index 3e5291e7..8b830bd0 100644 --- a/botocore/data/cloudfront/2016-08-01/service-2.json +++ b/botocore/data/cloudfront/2016-08-01/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-08-20/service-2.json b/botocore/data/cloudfront/2016-08-20/service-2.json index d0681d35..09fa631b 100644 --- a/botocore/data/cloudfront/2016-08-20/service-2.json +++ b/botocore/data/cloudfront/2016-08-20/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-09-07/service-2.json b/botocore/data/cloudfront/2016-09-07/service-2.json index 15b6d9bd..64747f92 100644 --- a/botocore/data/cloudfront/2016-09-07/service-2.json +++ b/botocore/data/cloudfront/2016-09-07/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-09-29/service-2.json b/botocore/data/cloudfront/2016-09-29/service-2.json index c920205e..e1cbcb41 100644 --- a/botocore/data/cloudfront/2016-09-29/service-2.json +++ b/botocore/data/cloudfront/2016-09-29/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/cloudfront/2016-11-25/service-2.json b/botocore/data/cloudfront/2016-11-25/service-2.json index dd90e013..304d0ad8 100644 --- a/botocore/data/cloudfront/2016-11-25/service-2.json +++ b/botocore/data/cloudfront/2016-11-25/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "uid":"cloudfront-2016-11-25" }, diff --git a/botocore/data/cloudfront/2017-03-25/service-2.json b/botocore/data/cloudfront/2017-03-25/service-2.json index 092482e1..a424317e 100644 --- a/botocore/data/cloudfront/2017-03-25/service-2.json +++ b/botocore/data/cloudfront/2017-03-25/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-xml", "serviceAbbreviation":"CloudFront", "serviceFullName":"Amazon CloudFront", + "serviceId":"CloudFront", "signatureVersion":"v4", "uid":"cloudfront-2017-03-25" }, diff --git a/botocore/data/cloudfront/2018-06-18/service-2.json b/botocore/data/cloudfront/2018-06-18/service-2.json index 6bd2b439..74027b2e 100644 --- a/botocore/data/cloudfront/2018-06-18/service-2.json +++ b/botocore/data/cloudfront/2018-06-18/service-2.json @@ -84,7 +84,7 @@ {"shape":"IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior"}, {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"} ], - "documentation":"

Creates a new web distribution. Send a POST request to the /CloudFront API version/distribution/distribution ID resource.

" + "documentation":"

Creates a new web distribution. You create a CloudFront distribution to tell CloudFront where you want content to be delivered from, and the details about how to track and manage content delivery. Send a POST request to the /CloudFront API version/distribution/distribution ID resource.

When you update a distribution, there are more required fields than when you create a distribution. When you update your distribution by using UpdateDistribution, follow the steps included in the documentation to get the current configuration and then make your updates. This helps to make sure that you include all of the required fields. To view a summary, see Required Fields for Create Distribution and Update Distribution in the Amazon CloudFront Developer Guide.

If you are using Adobe Flash Media Server's RTMP protocol, you set up a different kind of CloudFront distribution. For more information, see CreateStreamingDistribution.

" }, "CreateDistributionWithTags":{ "name":"CreateDistributionWithTags2018_06_18", @@ -785,7 +785,7 @@ {"shape":"IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior"}, {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"} ], - "documentation":"

Updates the configuration for a web distribution. Perform the following steps.

For information about updating a distribution using the CloudFront console, see Creating or Updating a Web Distribution Using the CloudFront Console in the Amazon CloudFront Developer Guide.

To update a web distribution using the CloudFront API

  1. Submit a GetDistributionConfig request to get the current configuration and an Etag header for the distribution.

    If you update the distribution again, you need to get a new Etag header.

  2. Update the XML document that was returned in the response to your GetDistributionConfig request to include the desired changes. You can't change the value of CallerReference. If you try to change this value, CloudFront returns an IllegalUpdate error. Note that you must strip out the ETag parameter that is returned.

    The new configuration replaces the existing configuration; the values that you specify in an UpdateDistribution request are not merged into the existing configuration. When you add, delete, or replace values in an element that allows multiple values (for example, CNAME), you must specify all of the values that you want to appear in the updated distribution. In addition, you must update the corresponding Quantity element.

  3. Submit an UpdateDistribution request to update the configuration for your distribution:

    • In the request body, include the XML document that you updated in Step 2. The request body must include an XML document with a DistributionConfig element.

    • Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GetDistributionConfig request in Step 1.

  4. Review the response to the UpdateDistribution request to confirm that the configuration was successfully updated.

  5. Optional: Submit a GetDistribution request to confirm that your changes have propagated. When propagation is complete, the value of Status is Deployed.

    Beginning with the 2012-05-05 version of the CloudFront API, we made substantial changes to the format of the XML document that you include in the request body when you create or update a distribution. With previous versions of the API, we discovered that it was too easy to accidentally delete one or more values for an element that accepts multiple values, for example, CNAMEs and trusted signers. Our changes for the 2012-05-05 release are intended to prevent these accidental deletions and to notify you when there's a mismatch between the number of values you say you're specifying in the Quantity element and the number of values you're actually specifying.

" + "documentation":"

Updates the configuration for a web distribution.

When you update a distribution, there are more required fields than when you create a distribution. When you update your distribution by using this API action, follow the steps here to get the current configuration and then make your updates, to make sure that you include all of the required fields. To view a summary, see Required Fields for Create Distribution and Update Distribution in the Amazon CloudFront Developer Guide.

The update process includes getting the current distribution configuration, updating the XML document that is returned to make your changes, and then submitting an UpdateDistribution request to make the updates.

For information about updating a distribution using the CloudFront console instead, see Creating a Distribution in the Amazon CloudFront Developer Guide.

To update a web distribution using the CloudFront API

  1. Submit a GetDistributionConfig request to get the current configuration and an Etag header for the distribution.

    If you update the distribution again, you must get a new Etag header.

  2. Update the XML document that was returned in the response to your GetDistributionConfig request to include your changes.

    When you edit the XML file, be aware of the following:

    • You must strip out the ETag parameter that is returned.

    • Additional fields are required when you update a distribution. There may be fields included in the XML file for features that you haven't configured for your distribution. This is expected and required to successfully update the distribution.

    • You can't change the value of CallerReference. If you try to change this value, CloudFront returns an IllegalUpdate error.

    • The new configuration replaces the existing configuration; the values that you specify in an UpdateDistribution request are not merged into your existing configuration. When you add, delete, or replace values in an element that allows multiple values (for example, CNAME), you must specify all of the values that you want to appear in the updated distribution. In addition, you must update the corresponding Quantity element.

  3. Submit an UpdateDistribution request to update the configuration for your distribution:

    • In the request body, include the XML document that you updated in Step 2. The request body must include an XML document with a DistributionConfig element.

    • Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GetDistributionConfig request in Step 1.

  4. Review the response to the UpdateDistribution request to confirm that the configuration was successfully updated.

  5. Optional: Submit a GetDistribution request to confirm that your changes have propagated. When propagation is complete, the value of Status is Deployed.

" }, "UpdateFieldLevelEncryptionConfig":{ "name":"UpdateFieldLevelEncryptionConfig2018_06_18", @@ -3383,7 +3383,7 @@ }, "IncludeBody":{ "shape":"boolean", - "documentation":"

A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing Body Content in the Amazon CloudFront Developer Guide.

" + "documentation":"

A flag that allows a Lambda function to have read access to the body content. For more information, see Accessing the Request Body by Choosing the Include Body Option in the Amazon CloudFront Developer Guide.

" } }, "documentation":"

A complex type that contains a Lambda function association.

" diff --git a/botocore/data/cloudhsmv2/2017-04-28/service-2.json b/botocore/data/cloudhsmv2/2017-04-28/service-2.json index a73737c2..31c7a261 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/service-2.json +++ b/botocore/data/cloudhsmv2/2017-04-28/service-2.json @@ -28,7 +28,8 @@ {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmInvalidRequestException"}, {"shape":"CloudHsmAccessDeniedException"} - ] + ], + "documentation":"

Copy an AWS CloudHSM cluster backup to a different region.

" }, "CreateCluster":{ "name":"CreateCluster", @@ -64,6 +65,23 @@ ], "documentation":"

Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster.

" }, + "DeleteBackup":{ + "name":"DeleteBackup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBackupRequest"}, + "output":{"shape":"DeleteBackupResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request. For more information on restoring a backup, see RestoreBackup

" + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -165,6 +183,23 @@ ], "documentation":"

Gets a list of tags for the specified AWS CloudHSM cluster.

This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

" }, + "RestoreBackup":{ + "name":"RestoreBackup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreBackupRequest"}, + "output":{"shape":"RestoreBackupResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For more information on deleting a backup, see DeleteBackup.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -224,7 +259,11 @@ "CopyTimestamp":{"shape":"Timestamp"}, "SourceRegion":{"shape":"Region"}, "SourceBackup":{"shape":"BackupId"}, - "SourceCluster":{"shape":"ClusterId"} + "SourceCluster":{"shape":"ClusterId"}, + "DeleteTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time when the backup will be permanently deleted.

" + } }, "documentation":"

Contains information about a backup of an AWS CloudHSM cluster.

" }, @@ -241,7 +280,8 @@ "enum":[ "CREATE_IN_PROGRESS", "READY", - "DELETED" + "DELETED", + "PENDING_DELETION" ] }, "Backups":{ @@ -408,14 +448,23 @@ "BackupId" ], "members":{ - "DestinationRegion":{"shape":"Region"}, - "BackupId":{"shape":"BackupId"} + "DestinationRegion":{ + "shape":"Region", + "documentation":"

The AWS region that will contain your copied CloudHSM cluster backup.

" + }, + "BackupId":{ + "shape":"BackupId", + "documentation":"

The ID of the backup that will be copied to the destination region.

" + } } }, "CopyBackupToRegionResponse":{ "type":"structure", "members":{ - "DestinationBackup":{"shape":"DestinationBackup"} + "DestinationBackup":{ + "shape":"DestinationBackup", + "documentation":"

Information on the backup that will be copied to the destination region, including CreateTimestamp, SourceBackup, SourceCluster, and Source Region. CreateTimestamp of the destination backup will be the same as that of the source backup.

You will need to use the sourceBackupID returned in this operation to use the DescribeBackups operation on the backup that will be copied to the destination region.

" + } } }, "CreateClusterRequest":{ @@ -478,6 +527,25 @@ } } }, + "DeleteBackupRequest":{ + "type":"structure", + "required":["BackupId"], + "members":{ + "BackupId":{ + "shape":"BackupId", + "documentation":"

The ID of the backup to be deleted. To find the ID of a backup, use the DescribeBackups operation.

" + } + } + }, + "DeleteBackupResponse":{ + "type":"structure", + "members":{ + "Backup":{ + "shape":"Backup", + "documentation":"

Information on the Backup object deleted.

" + } + } + }, "DeleteClusterRequest":{ "type":"structure", "required":["ClusterId"], @@ -541,7 +609,7 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

" + "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation.

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

" }, "SortAscending":{"shape":"Boolean"} } @@ -771,6 +839,25 @@ "type":"string", "pattern":"[a-z]{2}(-(gov))?-(east|west|north|south|central){1,2}-\\d" }, + "RestoreBackupRequest":{ + "type":"structure", + "required":["BackupId"], + "members":{ + "BackupId":{ + "shape":"BackupId", + "documentation":"

The ID of the backup to be restored. To find the ID of a backup, use the DescribeBackups operation.

" + } + } + }, + "RestoreBackupResponse":{ + "type":"structure", + "members":{ + "Backup":{ + "shape":"Backup", + "documentation":"

Information on the Backup object created.

" + } + } + }, "SecurityGroup":{ "type":"string", "pattern":"sg-[0-9a-fA-F]" diff --git a/botocore/data/cloudsearch/2011-02-01/service-2.json b/botocore/data/cloudsearch/2011-02-01/service-2.json index 98364bdb..37c87252 100644 --- a/botocore/data/cloudsearch/2011-02-01/service-2.json +++ b/botocore/data/cloudsearch/2011-02-01/service-2.json @@ -4,6 +4,7 @@ "apiVersion":"2011-02-01", "endpointPrefix":"cloudsearch", "serviceFullName":"Amazon CloudSearch", + "serviceId":"CloudSearch", "signatureVersion":"v4", "xmlNamespace":"http://cloudsearch.amazonaws.com/doc/2011-02-01/", "protocol":"query" diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index a572ec7b..212f4aa4 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -137,7 +137,7 @@ "errors":[ {"shape":"InvalidNextToken"} ], - "documentation":"

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

" + "documentation":"

You can use the GetMetricData API to retrieve as many as 100 different metrics in a single request, with a total of as many as 100,800 datapoints. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

" }, "GetMetricStatistics":{ "name":"GetMetricStatistics", @@ -156,7 +156,20 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1.

  • The Min and the Max values of the statistic set are equal.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1.

  • The Min and the Max values of the statistic set are equal.

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

  • Data points with a period of less than 60 seconds are available for 3 hours. These data points are high-resolution metrics and are available only for custom metrics that have been defined with a StorageResolution of 1.

  • Data points with a period of 60 seconds (1-minute) are available for 15 days.

  • Data points with a period of 300 seconds (5-minute) are available for 63 days.

  • Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months).

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + }, + "GetMetricWidgetImage":{ + "name":"GetMetricWidgetImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMetricWidgetImageInput"}, + "output":{ + "shape":"GetMetricWidgetImageOutput", + "resultWrapper":"GetMetricWidgetImageResult" + }, + "documentation":"

You can use the GetMetricWidgetImage API to retrieve a snapshot graph of one or more Amazon CloudWatch metrics as a bitmap image. You can then embed this image into your services and products, such as wiki pages, reports, and documents. You could also retrieve images regularly, such as every minute, and create your own custom live dashboard.

The graph you retrieve can include all CloudWatch metric graph features, including metric math and horizontal and vertical annotations.

There is a limit of 20 transactions per second for this API. Each GetMetricWidgetImage action has the following limits:

  • As many as 100 metrics in the graph.

  • Up to 100 KB uncompressed payload.

" }, "ListDashboards":{ "name":"ListDashboards", @@ -173,7 +186,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Returns a list of the dashboards for your account. If you include DashboardNamePrefix, only those dashboards with names starting with the prefix are listed. Otherwise, all dashboards in your account are listed.

" + "documentation":"

Returns a list of the dashboards for your account. If you include DashboardNamePrefix, only those dashboards with names starting with the prefix are listed. Otherwise, all dashboards in your account are listed.

ListDashboards returns up to 1000 results on one page. If there are more than 1000 dashboards, you can call ListDashboards again and include the value you received for NextToken in the first call, to receive the next 1000 results.

" }, "ListMetrics":{ "name":"ListMetrics", @@ -190,7 +203,7 @@ {"shape":"InternalServiceFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

List the specified metrics. You can use the returned metrics with GetMetricStatistics to obtain statistical data.

Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.

After you create a metric, allow up to fifteen minutes before the metric appears. Statistics about the metric, however, are available sooner using GetMetricStatistics.

" + "documentation":"

List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to obtain statistical data.

Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.

After you create a metric, allow up to fifteen minutes before the metric appears. Statistics about the metric, however, are available sooner using GetMetricData or GetMetricStatistics.

" }, "PutDashboard":{ "name":"PutDashboard", @@ -207,7 +220,7 @@ {"shape":"DashboardInvalidInputError"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

You can have up to 500 dashboards per account. All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

" + "documentation":"

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

There is no limit to the number of dashboards in your account. All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

" }, "PutMetricAlarm":{ "name":"PutMetricAlarm", @@ -219,7 +232,7 @@ "errors":[ {"shape":"LimitExceededFault"} ], - "documentation":"

Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some operations:

  • iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on EC2 instance status metrics

  • ec2:StopInstances for alarms with stop actions

  • ec2:TerminateInstances for alarms with terminate actions

  • ec2:DescribeInstanceRecoveryAttribute and ec2:RecoverInstances for alarms with recover actions

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

You must create at least one stop, terminate, or reboot alarm using either the Amazon EC2 or CloudWatch consoles to create the EC2ActionsAccess IAM role. After this IAM role is created, you can create stop, terminate, or reboot alarms using a command-line interface or API.

" + "documentation":"

Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some operations:

  • iam:CreateServiceLinkedRole for all alarms with EC2 actions

  • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on EC2 instance status metrics

  • ec2:StopInstances for alarms with stop actions

  • ec2:TerminateInstances for alarms with terminate actions

  • ec2:DescribeInstanceRecoveryAttribute and ec2:RecoverInstances for alarms with recover actions

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information about service-linked roles, see AWS service-linked role.

" }, "PutMetricData":{ "name":"PutMetricData", @@ -234,7 +247,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricStatistics from the time they are submitted.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1

  • The Min and the Max values of the statistic set are equal

" + "documentation":"

Publishes metric data to Amazon CloudWatch. CloudWatch associates the data with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted.

CloudWatch needs raw data points to calculate percentile statistics. These raw data points could be published individually or as part of Values and Counts arrays. If you publish data using statistic sets in the StatisticValues field instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal.

  • The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.

" }, "SetAlarmState":{ "name":"SetAlarmState", @@ -322,6 +335,10 @@ "LessThanOrEqualToThreshold" ] }, + "Counts":{ + "type":"list", + "member":{"shape":"DatapointValue"} + }, "DashboardArn":{"type":"string"}, "DashboardBody":{"type":"string"}, "DashboardEntries":{ @@ -767,11 +784,11 @@ }, "StartTime":{ "shape":"Timestamp", - "documentation":"

The time stamp indicating the earliest data to be returned.

" + "documentation":"

The time stamp indicating the earliest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the StartTime.

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

The time stamp indicating the latest data to be returned.

" + "documentation":"

The time stamp indicating the latest data to be returned.

For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as EndTime can get a faster response from CloudWatch then setting 12:07 or 12:29 as the EndTime.

" }, "NextToken":{ "shape":"NextToken", @@ -841,7 +858,7 @@ }, "ExtendedStatistics":{ "shape":"ExtendedStatistics", - "documentation":"

The percentile statistics. Specify values between p0.0 and p100. When calling GetMetricStatistics, you must specify either Statistics or ExtendedStatistics, but not both.

" + "documentation":"

The percentile statistics. Specify values between p0.0 and p100. When calling GetMetricStatistics, you must specify either Statistics or ExtendedStatistics, but not both. Percentile statistics are not available for metrics when any of the metric values are negative numbers.

" }, "Unit":{ "shape":"StandardUnit", @@ -862,6 +879,29 @@ } } }, + "GetMetricWidgetImageInput":{ + "type":"structure", + "required":["MetricWidget"], + "members":{ + "MetricWidget":{ + "shape":"MetricWidget", + "documentation":"

A JSON string that defines the bitmap graph to be retrieved. The string includes the metrics to include in the graph, statistics, annotations, title, axis limits, and so on. You can include only one MetricWidget parameter in each GetMetricWidgetImage call.

For more information about the syntax of MetricWidget see CloudWatch-Metric-Widget-Structure.

If any metric on the graph could not load all the requested data points, an orange triangle with an exclamation point appears next to the graph legend.

" + }, + "OutputFormat":{ + "shape":"OutputFormat", + "documentation":"

The format of the resulting image. Only PNG images are supported.

The default is png. If you specify png, the API returns an HTTP response with the content-type set to text/xml. The image data is in a MetricWidgetImage field. For example:

<GetMetricWidgetImageResponse xmlns=\"http://monitoring.amazonaws.com/doc/2010-08-01/\">

<GetMetricWidgetImageResult>

<MetricWidgetImage>

iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...

</MetricWidgetImage>

</GetMetricWidgetImageResult>

<ResponseMetadata>

<RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId>

</ResponseMetadata>

</GetMetricWidgetImageResponse>

The image/png setting is intended only for custom HTTP requests. For most use cases, and all actions using an AWS SDK, you should use png. If you specify image/png, the HTTP response has a content-type set to image/png, and the body of the response is a PNG image.

" + } + } + }, + "GetMetricWidgetImageOutput":{ + "type":"structure", + "members":{ + "MetricWidgetImage":{ + "shape":"MetricWidgetImage", + "documentation":"

The image of the graph, in the output format specified.

" + } + } + }, "HistoryData":{ "type":"string", "max":4095, @@ -942,7 +982,8 @@ "httpStatusCode":400, "senderFault":true }, - "exception":true + "exception":true, + "synthetic":true }, "InvalidParameterValueException":{ "type":"structure", @@ -958,7 +999,8 @@ "httpStatusCode":400, "senderFault":true }, - "exception":true + "exception":true, + "synthetic":true }, "LastModified":{"type":"timestamp"}, "LimitExceededFault":{ @@ -1320,6 +1362,14 @@ "shape":"StatisticSet", "documentation":"

The statistical values for the metric.

" }, + "Values":{ + "shape":"Values", + "documentation":"

Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to 150 unique values in each PutMetricData action that specifies a Values array.

Although the Values array accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

" + }, + "Counts":{ + "shape":"Counts", + "documentation":"

Array of numbers that is used along with the Values array. Each number in the Count array is the number of times the corresponding value in the Values array occurred during the period.

If you omit the Counts array, the default of 1 is used as the value for each count. If you include a Counts array, it must include the same amount of values as the Values array.

" + }, "Unit":{ "shape":"StandardUnit", "documentation":"

The unit of the metric.

" @@ -1374,6 +1424,8 @@ }, "documentation":"

This structure defines the metric to be returned, along with the statistics, period, and units.

" }, + "MetricWidget":{"type":"string"}, + "MetricWidgetImage":{"type":"blob"}, "Metrics":{ "type":"list", "member":{"shape":"Metric"} @@ -1392,7 +1444,8 @@ "httpStatusCode":400, "senderFault":true }, - "exception":true + "exception":true, + "synthetic":true }, "Namespace":{ "type":"string", @@ -1405,6 +1458,7 @@ "max":1024, "min":0 }, + "OutputFormat":{"type":"string"}, "Period":{ "type":"integer", "min":1 @@ -1461,15 +1515,15 @@ }, "OKActions":{ "shape":"ResourceList", - "documentation":"

The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" + "documentation":"

The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" }, "AlarmActions":{ "shape":"ResourceList", - "documentation":"

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" + "documentation":"

The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" }, "InsufficientDataActions":{ "shape":"ResourceList", - "documentation":"

The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:{account-id}:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" + "documentation":"

The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

Valid Values: arn:aws:automate:region:ec2:stop | arn:aws:automate:region:ec2:terminate | arn:aws:automate:region:ec2:recover | arn:aws:sns:region:account-id:sns-topic-name | arn:aws:autoscaling:region:account-id:scalingPolicy:policy-idautoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

Valid Values (for use with IAM roles): >arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

" }, "MetricName":{ "shape":"MetricName", @@ -1538,7 +1592,7 @@ }, "MetricData":{ "shape":"MetricData", - "documentation":"

The data for the metric.

" + "documentation":"

The data for the metric. The array can include no more than 20 metrics per call.

" } } }, @@ -1720,6 +1774,10 @@ "type":"string", "max":255, "min":1 + }, + "Values":{ + "type":"list", + "member":{"shape":"DatapointValue"} } }, "documentation":"

Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.

CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.

In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.

" diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index a85376fb..ae6b29aa 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -376,10 +376,22 @@ "shape":"ProjectSource", "documentation":"

Information about the source code to be built.

" }, + "secondarySources":{ + "shape":"ProjectSources", + "documentation":"

An array of ProjectSource objects.

" + }, + "secondarySourceVersions":{ + "shape":"ProjectSecondarySourceVersions", + "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID will be used. If not specified, the default branch's HEAD commit ID will be used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID will be used. If not specified, the default branch's HEAD commit ID will be used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object representing the build input ZIP file to use.

" + }, "artifacts":{ "shape":"BuildArtifacts", "documentation":"

Information about the output artifacts for the build.

" }, + "secondaryArtifacts":{ + "shape":"BuildArtifactsList", + "documentation":"

An array of ProjectArtifacts objects.

" + }, "cache":{ "shape":"ProjectCache", "documentation":"

Information about the cache for the build.

" @@ -445,10 +457,20 @@ "encryptionDisabled":{ "shape":"WrapperBoolean", "documentation":"

Information that tells you if encryption for build artifacts is disabled.

" + }, + "artifactIdentifier":{ + "shape":"String", + "documentation":"

An identifier for this artifact definition.

" } }, "documentation":"

Information about build output artifacts.

" }, + "BuildArtifactsList":{ + "type":"list", + "member":{"shape":"BuildArtifacts"}, + "max":12, + "min":0 + }, "BuildIds":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -533,6 +555,25 @@ "S3" ] }, + "CloudWatchLogsConfig":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"LogsConfigStatusType", + "documentation":"

The current status of the Amazon CloudWatch Logs for a build project. Valid values are:

  • ENABLED: Amazon CloudWatch Logs are enabled for this build project.

  • DISABLED: Amazon CloudWatch Logs are not enabled for this build project.

" + }, + "groupName":{ + "shape":"String", + "documentation":"

The group name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams

" + }, + "streamName":{ + "shape":"String", + "documentation":"

The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams

" + } + }, + "documentation":"

Information about Amazon CloudWatch Logs for a build project.

" + }, "ComputeType":{ "type":"string", "enum":[ @@ -563,10 +604,18 @@ "shape":"ProjectSource", "documentation":"

Information about the build input source code for the build project.

" }, + "secondarySources":{ + "shape":"ProjectSources", + "documentation":"

An array of ProjectSource objects.

" + }, "artifacts":{ "shape":"ProjectArtifacts", "documentation":"

Information about the build output artifacts for the build project.

" }, + "secondaryArtifacts":{ + "shape":"ProjectArtifactsList", + "documentation":"

An array of ProjectArtifacts objects.

" + }, "cache":{ "shape":"ProjectCache", "documentation":"

Stores recently used information so that it can be quickly accessed at a later time.

" @@ -598,6 +647,10 @@ "badgeEnabled":{ "shape":"WrapperBoolean", "documentation":"

Set this to true to generate a publicly-accessible URL for your project's build badge.

" + }, + "logsConfig":{ + "shape":"LogsConfig", + "documentation":"

Information about logs for the build project. Logs can be Amazon CloudWatch Logs, uploaded to a specified S3 bucket, or both.

" } } }, @@ -912,6 +965,27 @@ } } }, + "LogsConfig":{ + "type":"structure", + "members":{ + "cloudWatchLogs":{ + "shape":"CloudWatchLogsConfig", + "documentation":"

Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.

" + }, + "s3Logs":{ + "shape":"S3LogsConfig", + "documentation":"

Information about logs built to an S3 bucket for a build project. S3 logs are not enabled by default.

" + } + }, + "documentation":"

Information about logs for a build project. Logs can be Amazon CloudWatch Logs, built in a specified S3 bucket, or both.

" + }, + "LogsConfigStatusType":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "LogsLocation":{ "type":"structure", "members":{ @@ -926,6 +1000,18 @@ "deepLink":{ "shape":"String", "documentation":"

The URL to an individual build log in Amazon CloudWatch Logs.

" + }, + "s3DeepLink":{ + "shape":"String", + "documentation":"

The URL to an individual build log in an S3 bucket.

" + }, + "cloudWatchLogs":{ + "shape":"CloudWatchLogsConfig", + "documentation":"

Information about Amazon CloudWatch Logs for a build project.

" + }, + "s3Logs":{ + "shape":"S3LogsConfig", + "documentation":"

Information about S3 logs for a build project.

" } }, "documentation":"

Information about build logs in Amazon CloudWatch Logs.

" @@ -1001,10 +1087,18 @@ "shape":"ProjectSource", "documentation":"

Information about the build input source code for this build project.

" }, + "secondarySources":{ + "shape":"ProjectSources", + "documentation":"

An array of ProjectSource objects.

" + }, "artifacts":{ "shape":"ProjectArtifacts", "documentation":"

Information about the build output artifacts for the build project.

" }, + "secondaryArtifacts":{ + "shape":"ProjectArtifactsList", + "documentation":"

An array of ProjectArtifacts objects.

" + }, "cache":{ "shape":"ProjectCache", "documentation":"

Information about the cache for the build project.

" @@ -1048,6 +1142,10 @@ "badge":{ "shape":"ProjectBadge", "documentation":"

Information about the build badge for the build project.

" + }, + "logsConfig":{ + "shape":"LogsConfig", + "documentation":"

Information about logs for the build project. A project can create Amazon CloudWatch Logs, logs in an S3 bucket, or both.

" } }, "documentation":"

Information about a build project.

" @@ -1087,10 +1185,20 @@ "encryptionDisabled":{ "shape":"WrapperBoolean", "documentation":"

Set to true if you do not want your output artifacts encrypted. This option is only valid if your artifacts type is Amazon S3. If this is set with another artifacts type, an invalidInputException will be thrown.

" + }, + "artifactIdentifier":{ + "shape":"String", + "documentation":"

An identifier for this artifact definition.

" } }, "documentation":"

Information about the build output artifacts for the build project.

" }, + "ProjectArtifactsList":{ + "type":"list", + "member":{"shape":"ProjectArtifacts"}, + "max":12, + "min":0 + }, "ProjectBadge":{ "type":"structure", "members":{ @@ -1172,6 +1280,12 @@ "max":100, "min":1 }, + "ProjectSecondarySourceVersions":{ + "type":"list", + "member":{"shape":"ProjectSourceVersion"}, + "max":12, + "min":0 + }, "ProjectSortByType":{ "type":"string", "enum":[ @@ -1186,7 +1300,7 @@ "members":{ "type":{ "shape":"SourceType", - "documentation":"

The type of repository that contains the source code to be built. Valid values include:

  • BITBUCKET: The source code is in a Bitbucket repository.

  • CODECOMMIT: The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE: The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB: The source code is in a GitHub repository.

  • S3: The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.

" + "documentation":"

The type of repository that contains the source code to be built. Valid values include:

  • BITBUCKET: The source code is in a Bitbucket repository.

  • CODECOMMIT: The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE: The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB: The source code is in a GitHub repository.

  • NO_SOURCE: The project does not have input source code.

  • S3: The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.

" }, "location":{ "shape":"String", @@ -1211,10 +1325,38 @@ "insecureSsl":{ "shape":"WrapperBoolean", "documentation":"

Enable this flag to ignore SSL warnings while connecting to the project source code.

" + }, + "sourceIdentifier":{ + "shape":"String", + "documentation":"

An identifier for this project source.

" } }, "documentation":"

Information about the build input source code for the build project.

" }, + "ProjectSourceVersion":{ + "type":"structure", + "required":[ + "sourceIdentifier", + "sourceVersion" + ], + "members":{ + "sourceIdentifier":{ + "shape":"String", + "documentation":"

An identifier for a source in the build project.

" + }, + "sourceVersion":{ + "shape":"String", + "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID will be used. If not specified, the default branch's HEAD commit ID will be used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID will be used. If not specified, the default branch's HEAD commit ID will be used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object representing the build input ZIP file to use.

" + } + }, + "documentation":"

A source identifier and its corresponding version.

" + }, + "ProjectSources":{ + "type":"list", + "member":{"shape":"ProjectSource"}, + "max":12, + "min":0 + }, "Projects":{ "type":"list", "member":{"shape":"Project"} @@ -1233,6 +1375,21 @@ "documentation":"

The specified AWS resource cannot be found.

", "exception":true }, + "S3LogsConfig":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"LogsConfigStatusType", + "documentation":"

The current status of the S3 build logs. Valid values are:

  • ENABLED: S3 build logs are enabled for this build project.

  • DISABLED: S3 build logs are not enabled for this build project.

" + }, + "location":{ + "shape":"String", + "documentation":"

The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 bucket name is my-bucket, and your path prefix is build-log, then acceptable formats are my-bucket/build-log or aws:s3:::my-bucket/build-log.

" + } + }, + "documentation":"

Information about S3 logs for a build project.

" + }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -1272,7 +1429,8 @@ "GITHUB", "S3", "BITBUCKET", - "GITHUB_ENTERPRISE" + "GITHUB_ENTERPRISE", + "NO_SOURCE" ] }, "StartBuildInput":{ @@ -1283,6 +1441,14 @@ "shape":"NonEmptyString", "documentation":"

The name of the AWS CodeBuild build project to start running a build.

" }, + "secondarySourcesOverride":{ + "shape":"ProjectSources", + "documentation":"

An array of ProjectSource objects.

" + }, + "secondarySourcesVersionOverride":{ + "shape":"ProjectSecondarySourceVersions", + "documentation":"

An array of ProjectSourceVersion objects that specify one or more versions of the project's secondary sources to be used for this build only.

" + }, "sourceVersion":{ "shape":"String", "documentation":"

A version of the build input to be built, for this build only. If not specified, the latest version will be used. If specified, must be one of:

  • For AWS CodeCommit: the commit ID to use.

  • For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID will be used. If not specified, the default branch's HEAD commit ID will be used.

  • For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID will be used. If not specified, the default branch's HEAD commit ID will be used.

  • For Amazon Simple Storage Service (Amazon S3): the version ID of the object representing the build input ZIP file to use.

" @@ -1291,13 +1457,17 @@ "shape":"ProjectArtifacts", "documentation":"

Build output artifact settings that override, for this build only, the latest ones already defined in the build project.

" }, + "secondaryArtifactsOverride":{ + "shape":"ProjectArtifactsList", + "documentation":"

An array of ProjectArtifacts objects.

" + }, "environmentVariablesOverride":{ "shape":"EnvironmentVariables", "documentation":"

A set of environment variables that overrides, for this build only, the latest ones already defined in the build project.

" }, "sourceTypeOverride":{ "shape":"SourceType", - "documentation":"

A source input type for this build that overrides the source input defined in the build project

" + "documentation":"

A source input type for this build that overrides the source input defined in the build project.

" }, "sourceLocationOverride":{ "shape":"String", @@ -1358,6 +1528,10 @@ "idempotencyToken":{ "shape":"String", "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 12 hours. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" + }, + "logsConfigOverride":{ + "shape":"LogsConfig", + "documentation":"

Log settings for this build that override the log settings defined in the build project.

" } } }, @@ -1448,10 +1622,18 @@ "shape":"ProjectSource", "documentation":"

Information to be changed about the build input source code for the build project.

" }, + "secondarySources":{ + "shape":"ProjectSources", + "documentation":"

An array of ProjectSource objects.

" + }, "artifacts":{ "shape":"ProjectArtifacts", "documentation":"

Information to be changed about the build output artifacts for the build project.

" }, + "secondaryArtifacts":{ + "shape":"ProjectArtifactsList", + "documentation":"

An array of ProjectSource objects.

" + }, "cache":{ "shape":"ProjectCache", "documentation":"

Stores recently used information so that it can be quickly accessed at a later time.

" @@ -1483,6 +1665,10 @@ "badgeEnabled":{ "shape":"WrapperBoolean", "documentation":"

Set this to true to generate a publicly-accessible URL for your project's build badge.

" + }, + "logsConfig":{ + "shape":"LogsConfig", + "documentation":"

Information about logs for the build project. A project can create Amazon CloudWatch Logs, logs in an S3 bucket, or both.

" } } }, diff --git a/botocore/data/codecommit/2015-04-13/service-2.json b/botocore/data/codecommit/2015-04-13/service-2.json index d3c23312..c6968c3f 100644 --- a/botocore/data/codecommit/2015-04-13/service-2.json +++ b/botocore/data/codecommit/2015-04-13/service-2.json @@ -156,6 +156,40 @@ ], "documentation":"

Deletes the content of a comment made on a change, file, or commit in a repository.

" }, + "DeleteFile":{ + "name":"DeleteFile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFileInput"}, + "output":{"shape":"DeleteFileOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"ParentCommitIdRequiredException"}, + {"shape":"InvalidParentCommitIdException"}, + {"shape":"ParentCommitDoesNotExistException"}, + {"shape":"ParentCommitIdOutdatedException"}, + {"shape":"PathRequiredException"}, + {"shape":"InvalidPathException"}, + {"shape":"FileDoesNotExistException"}, + {"shape":"BranchNameRequiredException"}, + {"shape":"InvalidBranchNameException"}, + {"shape":"BranchDoesNotExistException"}, + {"shape":"BranchNameIsTagNameException"}, + {"shape":"NameLengthExceededException"}, + {"shape":"InvalidEmailException"}, + {"shape":"CommitMessageLengthExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ], + "documentation":"

Deletes a specified file from a specified branch. A commit is created on the branch that contains the revision. The file will still exist in the commits prior to the commit that contains the deletion.

" + }, "DeleteRepository":{ "name":"DeleteRepository", "http":{ @@ -368,6 +402,57 @@ ], "documentation":"

Returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference). Results can be limited to a specified path.

" }, + "GetFile":{ + "name":"GetFile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetFileInput"}, + "output":{"shape":"GetFileOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidCommitException"}, + {"shape":"CommitDoesNotExistException"}, + {"shape":"PathRequiredException"}, + {"shape":"InvalidPathException"}, + {"shape":"FileDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"}, + {"shape":"FileTooLargeException"} + ], + "documentation":"

Returns the base-64 encoded contents of a specified file and its metadata.

" + }, + "GetFolder":{ + "name":"GetFolder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetFolderInput"}, + "output":{"shape":"GetFolderOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidCommitException"}, + {"shape":"CommitDoesNotExistException"}, + {"shape":"PathRequiredException"}, + {"shape":"InvalidPathException"}, + {"shape":"FolderDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ], + "documentation":"

Returns the contents of a specified folder in a repository.

" + }, "GetMergeConflicts":{ "name":"GetMergeConflicts", "http":{ @@ -671,6 +756,7 @@ {"shape":"NameLengthExceededException"}, {"shape":"InvalidEmailException"}, {"shape":"CommitMessageLengthExceededException"}, + {"shape":"InvalidDeletionParameterException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -680,7 +766,7 @@ {"shape":"FileNameConflictsWithDirectoryNameException"}, {"shape":"DirectoryNameConflictsWithFileNameException"} ], - "documentation":"

Adds or updates a file in an AWS CodeCommit repository.

" + "documentation":"

Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.

" }, "PutRepositoryTriggers":{ "name":"PutRepositoryTriggers", @@ -1411,6 +1497,76 @@ } } }, + "DeleteFileInput":{ + "type":"structure", + "required":[ + "repositoryName", + "branchName", + "filePath", + "parentCommitId" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the file to delete.

" + }, + "branchName":{ + "shape":"BranchName", + "documentation":"

The name of the branch where the commit will be made deleting the file.

" + }, + "filePath":{ + "shape":"Path", + "documentation":"

The fully-qualified path to the file that will be deleted, including the full name and extension of that file. For example, /examples/file.md is a fully qualified path to a file named file.md in a folder named examples.

" + }, + "parentCommitId":{ + "shape":"CommitId", + "documentation":"

The ID of the commit that is the tip of the branch where you want to create the commit that will delete the file. This must be the HEAD commit for the branch. The commit that deletes the file will be created from this commit ID.

" + }, + "keepEmptyFolders":{ + "shape":"KeepEmptyFolders", + "documentation":"

Specifies whether to delete the folder or directory that contains the file you want to delete if that file is the only object in the folder or directory. By default, empty folders will be deleted. This includes empty folders that are part of the directory structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file in dir4 will also delete the empty folders dir4, dir3, and dir2.

" + }, + "commitMessage":{ + "shape":"Message", + "documentation":"

The commit message you want to include as part of deleting the file. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the author of the commit that deletes the file. If no name is specified, the user's ARN will be used as the author name and committer name.

" + }, + "email":{ + "shape":"Email", + "documentation":"

The email address for the commit that deletes the file. If no email address is specified, the email address will be left blank.

" + } + } + }, + "DeleteFileOutput":{ + "type":"structure", + "required":[ + "commitId", + "blobId", + "treeId", + "filePath" + ], + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"

The full commit ID of the commit that contains the change that deletes the file.

" + }, + "blobId":{ + "shape":"ObjectId", + "documentation":"

The blob ID removed from the tree as part of deleting the file.

" + }, + "treeId":{ + "shape":"ObjectId", + "documentation":"

The full SHA-1 pointer of the tree information for the commit that contains the delete file change.

" + }, + "filePath":{ + "shape":"Path", + "documentation":"

The fully-qualified path to the file that will be deleted, including the full name and extension of that file.

" + } + } + }, "DeleteRepositoryInput":{ "type":"structure", "required":["repositoryName"], @@ -1543,6 +1699,28 @@ "exception":true }, "EventDate":{"type":"timestamp"}, + "File":{ + "type":"structure", + "members":{ + "blobId":{ + "shape":"ObjectId", + "documentation":"

The blob ID that contains the file information.

" + }, + "absolutePath":{ + "shape":"Path", + "documentation":"

The fully-qualified path to the file in the repository.

" + }, + "relativePath":{ + "shape":"Path", + "documentation":"

The relative path of the file from the folder where the query originated.

" + }, + "fileMode":{ + "shape":"FileModeTypeEnum", + "documentation":"

The extrapolated file mode permissions for the file. Valid values include EXECUTABLE and NORMAL.

" + } + }, + "documentation":"

Returns information about a file in a repository.

" + }, "FileContent":{ "type":"blob", "max":6291456 @@ -1561,6 +1739,17 @@ "documentation":"

The file cannot be added because it is too large. The maximum file size that can be added using PutFile is 6 MB. For files larger than 6 MB but smaller than 2 GB, add them using a Git client.

", "exception":true }, + "FileDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified file does not exist. Verify that you have provided the correct name of the file, including its full path and extension.

", + "exception":true + }, + "FileList":{ + "type":"list", + "member":{"shape":"File"} + }, "FileModeTypeEnum":{ "type":"string", "enum":[ @@ -1583,6 +1772,35 @@ "documentation":"

The specified file exceeds the file size limit for AWS CodeCommit. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

", "exception":true }, + "Folder":{ + "type":"structure", + "members":{ + "treeId":{ + "shape":"ObjectId", + "documentation":"

The full SHA-1 pointer of the tree information for the commit that contains the folder.

" + }, + "absolutePath":{ + "shape":"Path", + "documentation":"

The fully-qualified path of the folder in the repository.

" + }, + "relativePath":{ + "shape":"Path", + "documentation":"

The relative path of the specified folder from the folder where the query originated.

" + } + }, + "documentation":"

Returns information about a folder in a repository.

" + }, + "FolderDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified folder does not exist. Either the folder name is not correct, or you did not provide the full path to the folder.

", + "exception":true + }, + "FolderList":{ + "type":"list", + "member":{"shape":"Folder"} + }, "GetBlobInput":{ "type":"structure", "required":[ @@ -1819,6 +2037,122 @@ } } }, + "GetFileInput":{ + "type":"structure", + "required":[ + "repositoryName", + "filePath" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the file.

" + }, + "commitSpecifier":{ + "shape":"CommitName", + "documentation":"

The fully-quaified reference that identifies the commit that contains the file. For example, you could specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, then the head commit will be used.

" + }, + "filePath":{ + "shape":"Path", + "documentation":"

The fully-qualified path to the file, including the full name and extension of the file. For example, /examples/file.md is the fully-qualified path to a file named file.md in a folder named examples.

" + } + } + }, + "GetFileOutput":{ + "type":"structure", + "required":[ + "commitId", + "blobId", + "filePath", + "fileMode", + "fileSize", + "fileContent" + ], + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"

The full commit ID of the commit that contains the content returned by GetFile.

" + }, + "blobId":{ + "shape":"ObjectId", + "documentation":"

The blob ID of the object that represents the file content.

" + }, + "filePath":{ + "shape":"Path", + "documentation":"

The fully qualified path to the specified file. This returns the name and extension of the file.

" + }, + "fileMode":{ + "shape":"FileModeTypeEnum", + "documentation":"

The extrapolated file mode permissions of the blob. Valid values include strings such as EXECUTABLE and not numeric values.

The file mode permissions returned by this API are not the standard file mode permission values, such as 100644, but rather extrapolated values. See below for a full list of supported return values.

" + }, + "fileSize":{ + "shape":"ObjectSize", + "documentation":"

The size of the contents of the file, in bytes.

" + }, + "fileContent":{ + "shape":"FileContent", + "documentation":"

The base-64 encoded binary data object that represents the content of the file.

" + } + } + }, + "GetFolderInput":{ + "type":"structure", + "required":[ + "repositoryName", + "folderPath" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "commitSpecifier":{ + "shape":"CommitName", + "documentation":"

A fully-qualified reference used to identify a commit that contains the version of the folder's content to return. A fully-qualified reference can be a commit ID, branch name, tag, or reference such as HEAD. If no specifier is provided, the folder content will be returned as it exists in the HEAD commit.

" + }, + "folderPath":{ + "shape":"Path", + "documentation":"

The fully-qualified path to the folder whose contents will be returned, including the folder name. For example, /examples is a fully-qualified path to a folder named examples that was created off of the root directory (/) of a repository.

" + } + } + }, + "GetFolderOutput":{ + "type":"structure", + "required":[ + "commitId", + "folderPath" + ], + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"

The full commit ID used as a reference for which version of the folder content is returned.

" + }, + "folderPath":{ + "shape":"Path", + "documentation":"

The fully-qualified path of the folder whose contents are returned.

" + }, + "treeId":{ + "shape":"ObjectId", + "documentation":"

The full SHA-1 pointer of the tree information for the commit that contains the folder.

" + }, + "subFolders":{ + "shape":"FolderList", + "documentation":"

The list of folders that exist beneath the specified folder, if any.

" + }, + "files":{ + "shape":"FileList", + "documentation":"

The list of files that exist in the specified folder, if any.

" + }, + "symbolicLinks":{ + "shape":"SymbolicLinkList", + "documentation":"

The list of symbolic links to other files and folders that exist in the specified folder, if any.

" + }, + "subModules":{ + "shape":"SubModuleList", + "documentation":"

The list of submodules that exist in the specified folder, if any.

" + } + } + }, "GetMergeConflictsInput":{ "type":"structure", "required":[ @@ -2004,6 +2338,13 @@ "documentation":"

The specified continuation token is not valid.

", "exception":true }, + "InvalidDeletionParameterException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified deletion parameter is not valid.

", + "exception":true + }, "InvalidDescriptionException":{ "type":"structure", "members":{ @@ -2217,6 +2558,7 @@ "IsCommentDeleted":{"type":"boolean"}, "IsMergeable":{"type":"boolean"}, "IsMerged":{"type":"boolean"}, + "KeepEmptyFolders":{"type":"boolean"}, "LastModifiedDate":{"type":"timestamp"}, "Limit":{ "type":"integer", @@ -2446,11 +2788,12 @@ "type":"structure", "members":{ }, - "documentation":"

The file name is not valid because it has exceeded the character limit for file names. File names, including the path to the file, cannot exceed the character limit.

", + "documentation":"

The user name is not valid because it has exceeded the character limit for file names. File names, including the path to the file, cannot exceed the character limit.

", "exception":true }, "NextToken":{"type":"string"}, "ObjectId":{"type":"string"}, + "ObjectSize":{"type":"long"}, "OrderEnum":{ "type":"string", "enum":[ @@ -2462,7 +2805,7 @@ "type":"structure", "members":{ }, - "documentation":"

The parent commit ID is not valid. The specified parent commit ID does not exist in the specified branch of the repository.

", + "documentation":"

The parent commit ID is not valid because it does not exist. The specified parent commit ID does not exist in the specified branch of the repository.

", "exception":true }, "ParentCommitIdOutdatedException":{ @@ -2495,7 +2838,7 @@ "type":"structure", "members":{ }, - "documentation":"

The filePath for a location cannot be empty or null.

", + "documentation":"

The folderPath for a location cannot be null.

", "exception":true }, "Position":{"type":"long"}, @@ -2725,6 +3068,28 @@ "documentation":"

The pull request status cannot be updated because it is already closed.

", "exception":true }, + "PullRequestCreatedEventMetadata":{ + "type":"structure", + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository where the pull request was created.

" + }, + "sourceCommitId":{ + "shape":"CommitId", + "documentation":"

The commit ID on the source branch used when the pull request was created.

" + }, + "destinationCommitId":{ + "shape":"CommitId", + "documentation":"

The commit ID of the tip of the branch specified as the destination branch when the pull request was created.

" + }, + "mergeBase":{ + "shape":"CommitId", + "documentation":"

The commit ID of the most recent commit that the source branch and the destination branch have in common.

" + } + }, + "documentation":"

Metadata about the pull request that is used when comparing the pull request source with its destination.

" + }, "PullRequestDoesNotExistException":{ "type":"structure", "members":{ @@ -2751,6 +3116,10 @@ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.

" }, + "pullRequestCreatedEventMetadata":{ + "shape":"PullRequestCreatedEventMetadata", + "documentation":"

Information about the source and destination branches for the pull request.

" + }, "pullRequestStatusChangedEventMetadata":{ "shape":"PullRequestStatusChangedEventMetadata", "documentation":"

Information about the change in status for the pull request event.

" @@ -2823,6 +3192,10 @@ "afterCommitId":{ "shape":"CommitId", "documentation":"

The full commit ID of the commit in the source branch that was the tip of the branch at the time the pull request was updated.

" + }, + "mergeBase":{ + "shape":"CommitId", + "documentation":"

The commit ID of the most recent commit that the source branch and the destination branch have in common.

" } }, "documentation":"

Information about an update to the source branch of a pull request.

" @@ -2874,6 +3247,10 @@ "shape":"CommitId", "documentation":"

The full commit ID of the tip of the source branch used to create the pull request. If the pull request branch is updated by a push while the pull request is open, the commit ID will change to reflect the new tip of the branch.

" }, + "mergeBase":{ + "shape":"CommitId", + "documentation":"

The commit ID of the most recent commit that the source branch and the destination branch have in common.

" + }, "mergeMetadata":{ "shape":"MergeMetadata", "documentation":"

Returns metadata about the state of the merge, including whether the merge has been made.

" @@ -2900,7 +3277,7 @@ }, "branchName":{ "shape":"BranchName", - "documentation":"

The name of the branch where you want to add or update the file.

" + "documentation":"

The name of the branch where you want to add or update the file. If this is an empty repository, this branch will be created.

" }, "fileContent":{ "shape":"FileContent", @@ -2916,7 +3293,7 @@ }, "parentCommitId":{ "shape":"CommitId", - "documentation":"

The full commit ID of the head commit in the branch where you want to add or update the file. If the commit ID does not match the ID of the head commit at the time of the operation, an error will occur, and the file will not be added or updated.

" + "documentation":"

The full commit ID of the head commit in the branch where you want to add or update the file. If this is an empty repository, no commit ID is required. If this is not an empty repository, a commit ID is required.

The commit ID must match the ID of the head commit at the time of the operation, or an error will occur, and the file will not be added or updated.

" }, "commitMessage":{ "shape":"Message", @@ -2950,7 +3327,7 @@ }, "treeId":{ "shape":"ObjectId", - "documentation":"

Tree information for the commit that contains this file change.

" + "documentation":"

The full SHA-1 pointer of the tree information for the commit that contains this file change.

" } } }, @@ -3270,6 +3647,54 @@ "documentation":"

The source branch and the destination branch for the pull request are the same. You must specify different branches for the source and destination.

", "exception":true }, + "SubModule":{ + "type":"structure", + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"

The commit ID that contains the reference to the submodule.

" + }, + "absolutePath":{ + "shape":"Path", + "documentation":"

The fully qualified path to the folder that contains the reference to the submodule.

" + }, + "relativePath":{ + "shape":"Path", + "documentation":"

The relative path of the submodule from the folder where the query originated.

" + } + }, + "documentation":"

Returns information about a submodule reference in a repository folder.

" + }, + "SubModuleList":{ + "type":"list", + "member":{"shape":"SubModule"} + }, + "SymbolicLink":{ + "type":"structure", + "members":{ + "blobId":{ + "shape":"ObjectId", + "documentation":"

The blob ID that contains the information about the symbolic link.

" + }, + "absolutePath":{ + "shape":"Path", + "documentation":"

The fully-qualified path to the folder that contains the symbolic link.

" + }, + "relativePath":{ + "shape":"Path", + "documentation":"

The relative path of the symbolic link from the folder where the query originated.

" + }, + "fileMode":{ + "shape":"FileModeTypeEnum", + "documentation":"

The file mode permissions of the blob that cotains information about the symbolic link.

" + } + }, + "documentation":"

Returns information about a symbolic link in a repository folder.

" + }, + "SymbolicLinkList":{ + "type":"list", + "member":{"shape":"SymbolicLink"} + }, "Target":{ "type":"structure", "required":[ @@ -3545,5 +3970,5 @@ }, "blob":{"type":"blob"} }, - "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

  • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

  • CreateRepository, which creates an AWS CodeCommit repository.

  • DeleteRepository, which deletes an AWS CodeCommit repository.

  • GetRepository, which returns information about a specified repository.

  • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

  • UpdateRepositoryDescription, which sets or updates the description of the repository.

  • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.

Branches, by calling the following:

  • CreateBranch, which creates a new branch in a specified repository.

  • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

  • GetBranch, which returns information about a specified branch.

  • ListBranches, which lists all branches for a specified repository.

  • UpdateDefaultBranch, which changes the default branch for a repository.

Files, by calling the following:

  • PutFile, which adds or modifies a file in a specified repository and branch.

Information about committed code in a repository, by calling the following:

  • GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.

  • GetCommit, which returns information about a commit, including commit messages and author and committer information.

  • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).

Pull requests, by calling the following:

Information about comments in a repository, by calling the following:

Triggers, by calling the following:

  • GetRepositoryTriggers, which returns information about triggers configured for a repository.

  • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

  • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" + "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

  • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

  • CreateRepository, which creates an AWS CodeCommit repository.

  • DeleteRepository, which deletes an AWS CodeCommit repository.

  • GetRepository, which returns information about a specified repository.

  • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

  • UpdateRepositoryDescription, which sets or updates the description of the repository.

  • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.

Branches, by calling the following:

  • CreateBranch, which creates a new branch in a specified repository.

  • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

  • GetBranch, which returns information about a specified branch.

  • ListBranches, which lists all branches for a specified repository.

  • UpdateDefaultBranch, which changes the default branch for a repository.

Files, by calling the following:

  • DeleteFile, which deletes the content of a specified file from a specified branch.

  • GetFile, which returns the base-64 encoded content of a specified file.

  • GetFolder, which returns the contents of a specified folder or directory.

  • PutFile, which adds or modifies a file in a specified repository and branch.

Information about committed code in a repository, by calling the following:

  • GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.

  • GetCommit, which returns information about a commit, including commit messages and author and committer information.

  • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).

Pull requests, by calling the following:

Information about comments in a repository, by calling the following:

Triggers, by calling the following:

  • GetRepositoryTriggers, which returns information about triggers configured for a repository.

  • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

  • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" } diff --git a/botocore/data/codestar/2017-04-19/service-2.json b/botocore/data/codestar/2017-04-19/service-2.json index 2c53e815..fe44af17 100644 --- a/botocore/data/codestar/2017-04-19/service-2.json +++ b/botocore/data/codestar/2017-04-19/service-2.json @@ -369,6 +369,10 @@ "clientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

Reserved for future use.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags created for the project.

" } } }, @@ -550,6 +554,10 @@ "projectTemplateId":{ "shape":"ProjectTemplateId", "documentation":"

The ID for the AWS CodeStar project template used to create the project.

" + }, + "status":{ + "shape":"ProjectStatus", + "documentation":"

The project creation or deletion status.

" } } }, @@ -863,6 +871,21 @@ "documentation":"

The specified AWS CodeStar project was not found.

", "exception":true }, + "ProjectStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"State", + "documentation":"

The phase of completion for a project creation or deletion.

" + }, + "reason":{ + "shape":"Reason", + "documentation":"

In the case of a project creation or deletion failure, a reason for the failure.

" + } + }, + "documentation":"

An indication of whether a project creation or deletion is failed or successful.

" + }, "ProjectSummary":{ "type":"structure", "members":{ @@ -880,12 +903,17 @@ "ProjectTemplateId":{ "type":"string", "min":1, - "pattern":"^arn:aws[^:\\s]{0,5}:codestar:[^:\\s]+::project-template\\/[a-z0-9-]+$" + "pattern":"^arn:aws[^:\\s]{0,5}:codestar:[^:\\s]+::project-template(\\/(github|codecommit))?\\/[a-z0-9-]+$" }, "ProjectsList":{ "type":"list", "member":{"shape":"ProjectSummary"} }, + "Reason":{ + "type":"string", + "max":1024, + "pattern":"^$|^\\S(.*\\S)?$" + }, "RemoteAccessAllowed":{"type":"boolean"}, "Resource":{ "type":"structure", @@ -920,6 +948,10 @@ "type":"string", "pattern":"^arn:aws[^:\\s]*:cloudformation:[^:\\s]+:[0-9]{12}:stack\\/[^:\\s]+\\/[^:\\s]+$" }, + "State":{ + "type":"string", + "pattern":"^(CreateInProgress|CreateComplete|CreateFailed|DeleteComplete|DeleteFailed|DeleteInProgress|UpdateComplete|UpdateInProgress|UpdateFailed|Unknown)$" + }, "TagKey":{ "type":"string", "max":128, @@ -1165,7 +1197,8 @@ "type":"string", "max":64, "min":1, - "pattern":"^\\S(.*\\S)?$" + "pattern":"^\\S(.*\\S)?$", + "sensitive":true }, "UserProfileNotFoundException":{ "type":"structure", diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index f861dd24..39e01171 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -778,6 +778,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotAuthorizedException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], "documentation":"

Creates a new domain for a user pool.

" @@ -1023,7 +1024,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Client method for returning the configuration information and metadata of the specified user pool client.

" + "documentation":"

Client method for returning the configuration information and metadata of the specified user pool app client.

" }, "DescribeUserPoolDomain":{ "name":"DescribeUserPoolDomain", @@ -1819,7 +1820,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InvalidEmailRoleAccessPolicyException"} ], - "documentation":"

Updates the specified user pool with the specified attributes.

" + "documentation":"

Updates the specified user pool with the specified attributes. If you don't provide a value for an attribute, it will be set to the default value. You can get a list of the current user pool settings with .

" }, "UpdateUserPoolClient":{ "name":"UpdateUserPoolClient", @@ -1839,7 +1840,7 @@ {"shape":"InvalidOAuthFlowException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Allows the developer to update the specified user pool client and password policy.

" + "documentation":"

Updates the specified user pool app client with the specified attributes. If you don't provide a value for an attribute, it will be set to the default value. You can get a list of the current user pool app client settings with .

" }, "VerifySoftwareToken":{ "name":"VerifySoftwareToken", @@ -3649,7 +3650,7 @@ }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not use HTTP without TLS (i.e. use HTTPS instead of HTTP).

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

" + "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", @@ -3657,7 +3658,7 @@ }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not use HTTP without TLS (i.e. use HTTPS instead of HTTP).

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

" + "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -3702,12 +3703,20 @@ "UserPoolId":{ "shape":"UserPoolIdType", "documentation":"

The user pool ID.

" + }, + "CustomDomainConfig":{ + "shape":"CustomDomainConfigType", + "documentation":"

The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application.

Provide this parameter only if you want to use own custom domain for your user pool. Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain instead.

For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.

" } } }, "CreateUserPoolDomainResponse":{ "type":"structure", "members":{ + "CloudFrontDomain":{ + "shape":"DomainType", + "documentation":"

The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.

" + } } }, "CreateUserPoolRequest":{ @@ -3815,6 +3824,17 @@ "max":25, "min":1 }, + "CustomDomainConfigType":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{ + "shape":"ArnType", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain.

" + } + }, + "documentation":"

The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application.

" + }, "DateType":{"type":"timestamp"}, "DefaultEmailOptionType":{ "type":"string", @@ -4245,7 +4265,7 @@ "documentation":"

The S3 bucket where the static files for this domain are stored.

" }, "CloudFrontDistribution":{ - "shape":"ArnType", + "shape":"StringType", "documentation":"

The ARN of the CloudFront distribution.

" }, "Version":{ @@ -4255,7 +4275,8 @@ "Status":{ "shape":"DomainStatusType", "documentation":"

The domain status.

" - } + }, + "CustomDomainConfig":{"shape":"CustomDomainConfigType"} }, "documentation":"

A container for information about a domain.

" }, @@ -5441,7 +5462,7 @@ }, "Filter":{ "shape":"UserFilterType", - "documentation":"

A filter string of the form \"AttributeName Filter-Type \"AttributeValue\"\". Quotation marks within the filter string must be escaped using the backslash (\\) character. For example, \"family_name = \\\"Reddy\\\"\".

  • AttributeName: The name of the attribute to search for. You can only search for one attribute at a time.

  • Filter-Type: For an exact match, use =, for example, \"given_name = \\\"Jon\\\"\". For a prefix (\"starts with\") match, use ^=, for example, \"given_name ^= \\\"Jon\\\"\".

  • AttributeValue: The attribute value that must be matched for each user.

If the filter string is empty, ListUsers returns all users in the user pool.

You can only search for the following standard attributes:

  • username (case-sensitive)

  • email

  • phone_number

  • name

  • given_name

  • family_name

  • preferred_username

  • cognito:user_status (called Enabled in the Console) (case-sensitive)

  • status (case-insensitive)

  • sub

Custom attributes are not searchable.

For more information, see Searching for Users Using the ListUsers API and Examples of Using the ListUsers API in the Amazon Cognito Developer Guide.

" + "documentation":"

A filter string of the form \"AttributeName Filter-Type \"AttributeValue\"\". Quotation marks within the filter string must be escaped using the backslash (\\) character. For example, \"family_name = \\\"Reddy\\\"\".

  • AttributeName: The name of the attribute to search for. You can only search for one attribute at a time.

  • Filter-Type: For an exact match, use =, for example, \"given_name = \\\"Jon\\\"\". For a prefix (\"starts with\") match, use ^=, for example, \"given_name ^= \\\"Jon\\\"\".

  • AttributeValue: The attribute value that must be matched for each user.

If the filter string is empty, ListUsers returns all users in the user pool.

You can only search for the following standard attributes:

  • username (case-sensitive)

  • email

  • phone_number

  • name

  • given_name

  • family_name

  • preferred_username

  • cognito:user_status (called Status in the Console) (case-insensitive)

  • status (called Enabled in the Console) (case-sensitive)

  • sub

Custom attributes are not searchable.

For more information, see Searching for Users Using the ListUsers API and Examples of Using the ListUsers API in the Amazon Cognito Developer Guide.

" } }, "documentation":"

Represents the request to list users.

" @@ -6858,7 +6879,7 @@ }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not use HTTP without TLS (i.e. use HTTPS instead of HTTP).

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

" + "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", @@ -6866,7 +6887,7 @@ }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not use HTTP without TLS (i.e. use HTTPS instead of HTTP).

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

" + "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -7215,7 +7236,7 @@ }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not use HTTP without TLS (i.e. use HTTPS instead of HTTP).

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

" + "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", @@ -7223,7 +7244,7 @@ }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not use HTTP without TLS (i.e. use HTTPS instead of HTTP).

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

" + "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

  • Be an absolute URI.

  • Be registered with the authorization server.

  • Not include a fragment component.

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -7425,6 +7446,7 @@ "shape":"DomainType", "documentation":"

Holds the domain prefix if the user pool has a domain associated with it.

" }, + "CustomDomain":{"shape":"DomainType"}, "AdminCreateUserConfig":{ "shape":"AdminCreateUserConfigType", "documentation":"

The configuration for AdminCreateUser requests.

" diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index c8e4a5b4..75759988 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -1143,6 +1143,10 @@ "ConfigRuleState":{ "shape":"ConfigRuleState", "documentation":"

Indicates whether the AWS Config rule is active or is currently being deleted by AWS Config. It can also indicate the evaluation status for the AWS Config rule.

AWS Config sets the state of the rule to EVALUATING temporarily after you use the StartConfigRulesEvaluation request to evaluate your resources against the AWS Config rule.

AWS Config sets the state of the rule to DELETING_RESULTS temporarily after you use the DeleteEvaluationResults request to delete the current evaluation results for the AWS Config rule.

AWS Config temporarily sets the state of a rule to DELETING after you use the DeleteConfigRule request to delete the rule. After AWS Config deletes the rule, the rule and all of its evaluations are erased and are no longer available.

" + }, + "CreatedBy":{ + "shape":"StringWithCharLimit256", + "documentation":"

Service principal name of the service that created the rule.

The field is populated only if the service linked rule is created by a service. The field is empty if you create your own rule.

" } }, "documentation":"

An AWS Config rule represents an AWS Lambda function that you create for a custom rule or a predefined function for an AWS managed rule. The function evaluates configuration items to assess whether your AWS resources comply with your desired configurations. This function can run when AWS Config detects a configuration change to an AWS resource and at a periodic frequency that you choose (for example, every 24 hours).

You can use the AWS CLI and AWS SDKs if you want to create a rule that triggers evaluations for your resources when AWS Config delivers the configuration snapshot. For more information, see ConfigSnapshotDeliveryProperties.

For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.

" @@ -2620,7 +2624,7 @@ "type":"structure", "members":{ }, - "documentation":"

This exception is thrown if an evaluation is in progress or if you call the StartConfigRulesEvaluation API more than once per minute.

", + "documentation":"

For StartConfigRulesEvaluation API, this exception is thrown if an evaluation is in progress or if you call the StartConfigRulesEvaluation API more than once per minute.

For PutConfigurationAggregator API, this exception is thrown if the number of accounts and aggregators exceeds the limit.

", "exception":true }, "ListDiscoveredResourcesRequest":{ diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 3834d251..6b1dc60d 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -99,6 +99,23 @@ ], "documentation":"

Returns a HiearchyGroupStructure object, which contains data about the levels in the agent hierarchy.

" }, + "GetCurrentMetricData":{ + "name":"GetCurrentMetricData", + "http":{ + "method":"POST", + "requestUri":"/metrics/current/{InstanceId}" + }, + "input":{"shape":"GetCurrentMetricDataRequest"}, + "output":{"shape":"GetCurrentMetricDataResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

The GetCurrentMetricData operation retrieves current metric data from your Amazon Connect instance.

If you are using an IAM account, it must have permission to the connect:GetCurrentMetricData action.

" + }, "GetFederationToken":{ "name":"GetFederationToken", "http":{ @@ -117,6 +134,23 @@ ], "documentation":"

Retrieves a token for federation.

" }, + "GetMetricData":{ + "name":"GetMetricData", + "http":{ + "method":"POST", + "requestUri":"/metrics/historical/{InstanceId}" + }, + "input":{"shape":"GetMetricDataRequest"}, + "output":{"shape":"GetMetricDataResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

The GetMetricData operation retrieves historical metrics data from your Amazon Connect instance.

If you are using an IAM account, it must have permission to the connect:GetMetricData action.

" + }, "ListRoutingProfiles":{ "name":"ListRoutingProfiles", "http":{ @@ -202,7 +236,7 @@ {"shape":"DestinationNotAllowedException"}, {"shape":"OutboundContactNotPermittedException"} ], - "documentation":"

The StartOutboundVoiceContact operation initiates a contact flow to place an outbound call to a customer.

There is a throttling limit placed on usage of the API that includes a RateLimit of 2 per second, and a BurstLimit of 5 per second.

If you are using an IAM account, it must have permission to the connect:StartOutboundVoiceContact action.

" + "documentation":"

The StartOutboundVoiceContact operation initiates a contact flow to place an outbound call to a customer.

If you are using an IAM account, it must have permission to the connect:StartOutboundVoiceContact action.

" }, "StopContact":{ "name":"StopContact", @@ -221,6 +255,22 @@ ], "documentation":"

Ends the contact initiated by the StartOutboundVoiceContact operation.

If you are using an IAM account, it must have permission to the connect:StopContact action.

" }, + "UpdateContactAttributes":{ + "name":"UpdateContactAttributes", + "http":{ + "method":"POST", + "requestUri":"/contact/attributes" + }, + "input":{"shape":"UpdateContactAttributesRequest"}, + "output":{"shape":"UpdateContactAttributesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

The UpdateContactAttributes operation lets you programmatically create new, or update existing, contact attributes associated with a contact. You can use the operation to add or update attributes for both ongoing and completed contacts. For example, you can update the customer's name or the reason the customer called while the call is active, or add notes about steps that the agent took during the call that are displayed to the next agent that takes the call. You can also use the UpdateContactAttributes operation to update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or identifying abusive callers.

Contact attributes are available in Amazon Connect for 24 months, and are then deleted.

Important:

You cannot use the operation to update attributes for contacts that occurred prior to the release of the API, September 12, 2018. You can update attributes only for contacts that started after the release of the API. If you attempt to update attributes for a contact that occurred prior to the release of the API, a 400 error is returned. This applies also to queued callbacks that were initiated prior to the release of the API but are still active in your instance.

" + }, "UpdateUserHierarchy":{ "name":"UpdateUserHierarchy", "http":{ @@ -299,7 +349,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Update the security profiles assigned to the user.

" + "documentation":"

Updates the security profiles assigned to the user.

" } }, "shapes":{ @@ -326,36 +376,43 @@ }, "AttributeName":{ "type":"string", - "documentation":"Key for the key value pair to be used for additional attributes.", "max":32767, "min":1 }, "AttributeValue":{ "type":"string", - "documentation":"Value for the key value pair to be used for additional attributes.", "max":32767, "min":0 }, "Attributes":{ "type":"map", "key":{"shape":"AttributeName"}, - "value":{"shape":"AttributeValue"}, - "documentation":"Additional attributes can be provided in the request using this field. This will be passed to the contact flow execution. Client can make use of this additional info in their contact flow." + "value":{"shape":"AttributeValue"} }, "AutoAccept":{"type":"boolean"}, + "Channel":{ + "type":"string", + "enum":["VOICE"] + }, + "Channels":{ + "type":"list", + "member":{"shape":"Channel"}, + "max":1 + }, "ClientToken":{ "type":"string", - "documentation":"Dedupe token to be provided by the client. This token is used to avoid duplicate calls to the customer.", "max":500 }, + "Comparison":{ + "type":"string", + "enum":["LT"] + }, "ContactFlowId":{ "type":"string", - "documentation":"Amazon resource name for the contact flow to be executed to handle the current call.", "max":500 }, "ContactId":{ "type":"string", - "documentation":"Amazon Connect contact identifier. An unique ContactId will be generated for each contact request.", "max":256, "min":1 }, @@ -383,7 +440,7 @@ "members":{ "Username":{ "shape":"AgentUsername", - "documentation":"

The user name in Amazon Connect for the user to create.

" + "documentation":"

The user name in Amazon Connect for the account to create.

" }, "Password":{ "shape":"Password", @@ -456,6 +513,77 @@ }, "documentation":"

The credentials to use for federation.

" }, + "CurrentMetric":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"CurrentMetricName", + "documentation":"

The name of the metric.

" + }, + "Unit":{ + "shape":"Unit", + "documentation":"

The unit for the metric.

" + } + }, + "documentation":"

A CurrentMetric object that contains the Name and Unit for the metric.

" + }, + "CurrentMetricData":{ + "type":"structure", + "members":{ + "Metric":{ + "shape":"CurrentMetric", + "documentation":"

The metric in a CurrentMetricData object.

" + }, + "Value":{ + "shape":"Value", + "documentation":"

The value of the metric in the CurrentMetricData object.

", + "box":true + } + }, + "documentation":"

A CurrentMetricData object.

" + }, + "CurrentMetricDataCollections":{ + "type":"list", + "member":{"shape":"CurrentMetricData"} + }, + "CurrentMetricName":{ + "type":"string", + "documentation":"

A list of current metric names.

", + "enum":[ + "AGENTS_ONLINE", + "AGENTS_AVAILABLE", + "AGENTS_ON_CALL", + "AGENTS_NON_PRODUCTIVE", + "AGENTS_AFTER_CONTACT_WORK", + "AGENTS_ERROR", + "AGENTS_STAFFED", + "CONTACTS_IN_QUEUE", + "OLDEST_CONTACT_AGE", + "CONTACTS_SCHEDULED" + ] + }, + "CurrentMetricResult":{ + "type":"structure", + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

The Dimensions for the CurrentMetricResult object.

" + }, + "Collections":{ + "shape":"CurrentMetricDataCollections", + "documentation":"

The Collections for the CurrentMetricResult object.

" + } + }, + "documentation":"

A CurrentMetricResult object.

" + }, + "CurrentMetricResults":{ + "type":"list", + "member":{"shape":"CurrentMetricResult"} + }, + "CurrentMetrics":{ + "type":"list", + "member":{"shape":"CurrentMetric"} + }, "DeleteUserRequest":{ "type":"structure", "required":[ @@ -570,17 +698,99 @@ "error":{"httpStatusCode":403}, "exception":true }, + "Dimensions":{ + "type":"structure", + "members":{ + "Queue":{ + "shape":"QueueReference", + "documentation":"

A QueueReference object used as one part of dimension for the metrics results.

" + }, + "Channel":{ + "shape":"Channel", + "documentation":"

The channel used for grouping and filters. Only VOICE is supported.

" + } + }, + "documentation":"

A Dimensions object that includes the Channel and Queue for the metric.

" + }, "DirectoryUserId":{"type":"string"}, "DuplicateResourceException":{ "type":"structure", "members":{ "Message":{"shape":"Message"} }, - "documentation":"

A resource with that name already exisits.

", + "documentation":"

A resource with that name already exists.

", "error":{"httpStatusCode":409}, "exception":true }, "Email":{"type":"string"}, + "Filters":{ + "type":"structure", + "members":{ + "Queues":{ + "shape":"Queues", + "documentation":"

A list of up to 100 queue IDs or queue ARNs to use to filter the metrics retrieved. You can include both IDs and ARNs in a request.

" + }, + "Channels":{ + "shape":"Channels", + "documentation":"

The Channel to use as a filter for the metrics returned. Only VOICE is supported.

" + } + }, + "documentation":"

The filter, either channel or queues, to apply to the metric results retrieved.

" + }, + "GetCurrentMetricDataRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Filters", + "CurrentMetrics" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

A Filters object that contains a list of queue IDs or queue ARNs, up to 100, or list of Channels to use to filter the metrics returned in the response. Metric data is retrieved only for the resources associated with the queue IDs, ARNs, or Channels included in the filter. You can include both IDs and ARNs in the same request. To retrieve metrics for all queues, add the queue ID or ARN for each queue in your instance. Only VOICE is supported for Channels.

To find the ARN for a queue, open the queue you want to use in the Amazon Connect Queue editor. The ARN for the queue is displayed in the address bar as part of the URL. For example, the queue ARN is the set of characters at the end of the URL, after 'id=' such as arn:aws:connect:us-east-1:270923740243:instance/78fb859d-1b7d-44b1-8aa3-12f0835c5855/queue/1d1a4575-9618-40ab-bbeb-81e45795fe61. The queue ID is also included in the URL, and is the string after 'queue/'.

" + }, + "Groupings":{ + "shape":"Groupings", + "documentation":"

The grouping applied to the metrics returned. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues. If you group by CHANNEL, you should include a Channels filter. The only supported channel is VOICE.

If no Grouping is included in the request, a summary of CurrentMetrics is returned.

" + }, + "CurrentMetrics":{ + "shape":"CurrentMetrics", + "documentation":"

A list of CurrentMetric objects for the metrics to retrieve. Each CurrentMetric includes a name of a metric to retrieve and the unit to use for it.

The following metrics are available:

AGENTS_AVAILABLE

Unit: COUNT

AGENTS_ONLINE

Unit: COUNT

AGENTS_ON_CALL

Unit: COUNT

AGENTS_STAFFED

Unit: COUNT

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

AGENTS_NON_PRODUCTIVE

Unit: COUNT

AGENTS_ERROR

Unit: COUNT

CONTACTS_IN_QUEUE

Unit: COUNT

OLDEST_CONTACT_AGE

Unit: SECONDS

CONTACTS_SCHEDULED

Unit: COUNT

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

The token expires after 5 minutes from the time it is created. Subsequent requests that use the NextToken must use the same request parameters as the request that generated the token.

" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

MaxResults indicates the maximum number of results to return per page in the response, between 1 and 100.

", + "box":true + } + } + }, + "GetCurrentMetricDataResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

The token expires after 5 minutes from the time it is created. Subsequent requests that use the NextToken must use the same request parameters as the request that generated the token.

" + }, + "MetricResults":{ + "shape":"CurrentMetricResults", + "documentation":"

A list of CurrentMetricResult objects organized by Dimensions combining with CurrentMetricDataCollections.

Dimensions is the resourceId specified in the Filters of the request.

Collections is a list of CurrentMetricData objects with corresponding values to the CurrentMetrics specified in the request.

If no Grouping is specified in the request, Collections is a summary for the CurrentMetric returned.

" + }, + "DataSnapshotTime":{ + "shape":"timestamp", + "documentation":"

The time at which CurrentMetricData was retrieved and cached for pagination.

" + } + } + }, "GetFederationTokenRequest":{ "type":"structure", "required":["InstanceId"], @@ -602,6 +812,78 @@ } } }, + "GetMetricDataRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "StartTime", + "EndTime", + "Filters", + "HistoricalMetrics" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

", + "location":"uri", + "locationName":"InstanceId" + }, + "StartTime":{ + "shape":"timestamp", + "documentation":"

The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be specified using a multiple of 5 minutes, such as 10:05, 10:10, 10:15.

StartTime cannot be earlier than 24 hours before the time of the request. Historical metrics are available in Amazon Connect only for 24 hours.

" + }, + "EndTime":{ + "shape":"timestamp", + "documentation":"

The timestamp, in UNIX Epoch time format, at which to end the reporting interval for the retrieval of historical metrics data. The time must be specified using an interval of 5 minutes, such as 11:00, 11:05, 11:10, and must be later than the StartTime timestamp.

The time range between StartTime and EndTime must be less than 24 hours.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

A Filters object that contains a list of queue IDs or queue ARNs, up to 100, or a list of Channels to use to filter the metrics returned in the response. Metric data is retrieved only for the resources associated with the IDs, ARNs, or Channels included in the filter. You can use both IDs and ARNs together in a request. Only VOICE is supported for Channel.

To find the ARN for a queue, open the queue you want to use in the Amazon Connect Queue editor. The ARN for the queue is displayed in the address bar as part of the URL. For example, the queue ARN is the set of characters at the end of the URL, after 'id=' such as arn:aws:connect:us-east-1:270923740243:instance/78fb859d-1b7d-44b1-8aa3-12f0835c5855/queue/1d1a4575-9618-40ab-bbeb-81e45795fe61. The queue ID is also included in the URL, and is the string after 'queue/'.

" + }, + "Groupings":{ + "shape":"Groupings", + "documentation":"

The grouping applied to the metrics returned. For example, when results are grouped by queueId, the metrics returned are grouped by queue. The values returned apply to the metrics for each queue rather than aggregated for all queues.

The current version supports grouping by Queue

If no Grouping is included in the request, a summary of HistoricalMetrics for all queues is returned.

" + }, + "HistoricalMetrics":{ + "shape":"HistoricalMetrics", + "documentation":"

A list of HistoricalMetric objects that contain the metrics to retrieve with the request.

A HistoricalMetric object contains: HistoricalMetricName, Statistic, Threshold, and Unit.

For each historical metric you include in the request, you must include a Unit and a Statistic.

The following historical metrics are available:

CONTACTS_QUEUED

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED

Unit: COUNT

Statistics: SUM

CONTACTS_ABANDONED

Unit: COUNT

Statistics: SUM

CONTACTS_CONSULTED

Unit: COUNT

Statistics: SUM

CONTACTS_AGENT_HUNG_UP_FIRST

Unit: COUNT

Statistics: SUM

CONTACTS_HANDLED_INCOMING

Unit: COUNT

Statistics: SUM

CONTACTS_HANDLED_OUTBOUND

Unit: COUNT

Statistics: SUM

CONTACTS_HOLD_ABANDONS

Unit: COUNT

Statistics: SUM

CONTACTS_TRANSFERRED_IN

Unit: COUNT

Statistics: SUM

CONTACTS_TRANSFERRED_OUT

Unit: COUNT

Statistics: SUM

CONTACTS_TRANSFERRED_IN_FROM_QUEUE

Unit: COUNT

Statistics: SUM

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: COUNT

Statistics: SUM

CALLBACK_CONTACTS_HANDLED

Unit: COUNT

Statistics: SUM

CALLBACK_CONTACTS_HANDLED

Unit: COUNT

Statistics: SUM

API_CONTACTS_HANDLED

Unit: COUNT

Statistics: SUM

CONTACTS_MISSED

Unit: COUNT

Statistics: SUM

OCCUPANCY

Unit: PERCENT

Statistics: AVG

HANDLE_TIME

Unit: SECONDS

Statistics: AVG

AFTER_CONTACT_WORK_TIME

Unit: SECONDS

Statistics: AVG

QUEUED_TIME

Unit: SECONDS

Statistics: MAX

ABANDON_TIME

Unit: COUNT

Statistics: SUM

QUEUE_ANSWER_TIME

Unit: SECONDS

Statistics: AVG

HOLD_TIME

Unit: SECONDS

Statistics: AVG

INTERACTION_TIME

Unit: SECONDS

Statistics: AVG

INTERACTION_AND_HOLD_TIME

Unit: SECONDS

Statistics: AVG

SERVICE_LEVEL

Unit: PERCENT

Statistics: AVG

Threshold: Only \"Less than\" comparisons are supported, with the following service level thresholds: 15, 20, 25, 30, 45, 60, 90, 120, 180, 240, 300, 600

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

Indicates the maximum number of results to return per page in the response, between 1-100.

", + "box":true + } + } + }, + "GetMetricDataResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string returned in the response. Use the value returned in the response as the value of the NextToken in a subsequent request to retrieve the next set of results.

The token expires after 5 minutes from the time it is created. Subsequent requests that use the NextToken must use the same request parameters as the request that generated the token.

" + }, + "MetricResults":{ + "shape":"HistoricalMetricResults", + "documentation":"

A list of HistoricalMetricResult objects, organized by Dimensions, which is the ID of the resource specified in the Filters used for the request. The metrics are combined with the metrics included in Collections, which is a list of HisotricalMetricData objects.

If no Grouping is specified in the request, Collections includes summary data for the HistoricalMetrics.

" + } + } + }, + "Grouping":{ + "type":"string", + "enum":[ + "QUEUE", + "CHANNEL" + ] + }, + "Groupings":{ + "type":"list", + "member":{"shape":"Grouping"}, + "max":2 + }, "HierarchyGroup":{ "type":"structure", "members":{ @@ -724,9 +1006,103 @@ }, "documentation":"

A HierarchyStructure object that contains information about the hierarchy group structure.

" }, + "HistoricalMetric":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"HistoricalMetricName", + "documentation":"

The name of the historical metric.

" + }, + "Threshold":{ + "shape":"Threshold", + "documentation":"

The threshold for the metric, used with service level metrics.

", + "box":true + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"

The statistic for the metric: SUM, MAX, or SUM.

" + }, + "Unit":{ + "shape":"Unit", + "documentation":"

The unit for the metric: COUNT, PERCENT, or SECONDS.

" + } + }, + "documentation":"

A HistoricalMetric object that contains the Name, Unit, Statistic, and Threshold for the metric.

" + }, + "HistoricalMetricData":{ + "type":"structure", + "members":{ + "Metric":{ + "shape":"HistoricalMetric", + "documentation":"

A HistoricalMetric object.

" + }, + "Value":{ + "shape":"Value", + "documentation":"

The Value of the metric.

", + "box":true + } + }, + "documentation":"

A HistoricalMetricData object than contains a Metric and a Value.

" + }, + "HistoricalMetricDataCollections":{ + "type":"list", + "member":{"shape":"HistoricalMetricData"} + }, + "HistoricalMetricName":{ + "type":"string", + "documentation":"

A list of historical metric names.

", + "enum":[ + "CONTACTS_QUEUED", + "CONTACTS_HANDLED", + "CONTACTS_ABANDONED", + "CONTACTS_CONSULTED", + "CONTACTS_AGENT_HUNG_UP_FIRST", + "CONTACTS_HANDLED_INCOMING", + "CONTACTS_HANDLED_OUTBOUND", + "CONTACTS_HOLD_ABANDONS", + "CONTACTS_TRANSFERRED_IN", + "CONTACTS_TRANSFERRED_OUT", + "CONTACTS_TRANSFERRED_IN_FROM_QUEUE", + "CONTACTS_TRANSFERRED_OUT_FROM_QUEUE", + "CONTACTS_MISSED", + "CALLBACK_CONTACTS_HANDLED", + "API_CONTACTS_HANDLED", + "OCCUPANCY", + "HANDLE_TIME", + "AFTER_CONTACT_WORK_TIME", + "QUEUED_TIME", + "ABANDON_TIME", + "QUEUE_ANSWER_TIME", + "HOLD_TIME", + "INTERACTION_TIME", + "INTERACTION_AND_HOLD_TIME", + "SERVICE_LEVEL" + ] + }, + "HistoricalMetricResult":{ + "type":"structure", + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

The Dimensions for the metrics.

" + }, + "Collections":{ + "shape":"HistoricalMetricDataCollections", + "documentation":"

A list of HistoricalMetricData objects.

" + } + }, + "documentation":"

The metrics data returned from a GetMetricData operation.

" + }, + "HistoricalMetricResults":{ + "type":"list", + "member":{"shape":"HistoricalMetricResult"} + }, + "HistoricalMetrics":{ + "type":"list", + "member":{"shape":"HistoricalMetric"} + }, "InstanceId":{ "type":"string", - "documentation":"Amazon Connect Organization ARN. A client must provide its organization ARN in order to place a call. This defines the call from organization.", "max":100, "min":1 }, @@ -774,7 +1150,7 @@ "documentation":"

The message.

" } }, - "documentation":"

The limit exceeded the maximum allowed active calls in a queue.

", + "documentation":"

The allowed limit for the resource has been reached.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -930,6 +1306,11 @@ } } }, + "MaxResult100":{ + "type":"integer", + "max":100, + "min":1 + }, "MaxResult1000":{ "type":"integer", "max":1000, @@ -951,12 +1332,9 @@ }, "Password":{ "type":"string", - "pattern":"/^(?=.*[a-z])(?=.*[A-Z])(?=.*\\d)[a-zA-Z\\d\\S]{8,}$/" - }, - "PhoneNumber":{ - "type":"string", - "documentation":"End customer's phone number to call." + "pattern":"/^(?=.*[a-z])(?=.*[A-Z])(?=.*\\d)[a-zA-Z\\d\\S]{8,64}$/" }, + "PhoneNumber":{"type":"string"}, "PhoneType":{ "type":"string", "enum":[ @@ -964,9 +1342,26 @@ "DESK_PHONE" ] }, - "QueueId":{ - "type":"string", - "documentation":"Identifier of the queue to be used for the contact routing." + "QueueId":{"type":"string"}, + "QueueReference":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"QueueId", + "documentation":"

The ID of the queue associated with the metrics returned.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of queue.

" + } + }, + "documentation":"

A QueueReference object that contains the the QueueId and ARN for the queue resource for which metrics are returned.

" + }, + "Queues":{ + "type":"list", + "member":{"shape":"QueueId"}, + "max":100, + "min":1 }, "ResourceNotFoundException":{ "type":"structure", @@ -1090,6 +1485,14 @@ } } }, + "Statistic":{ + "type":"string", + "enum":[ + "SUM", + "MAX", + "AVG" + ] + }, "StopContactRequest":{ "type":"structure", "required":[ @@ -1112,6 +1515,22 @@ "members":{ } }, + "Threshold":{ + "type":"structure", + "members":{ + "Comparison":{ + "shape":"Comparison", + "documentation":"

The Threshold to use to compare service level metrics to. Only \"Less than\" (LT) comparisons are supported.

" + }, + "ThresholdValue":{ + "shape":"ThresholdValue", + "documentation":"

The value of the threshold to compare the metric to. Only \"Less than\" (LT) comparisons are supported.

", + "box":true + } + }, + "documentation":"

A Threshold object that includes a comparison and ThresholdValue to compare to. Used with service level metrics.

" + }, + "ThresholdValue":{"type":"double"}, "ThrottlingException":{ "type":"structure", "members":{ @@ -1121,6 +1540,41 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Unit":{ + "type":"string", + "enum":[ + "SECONDS", + "COUNT", + "PERCENT" + ] + }, + "UpdateContactAttributesRequest":{ + "type":"structure", + "required":[ + "InitialContactId", + "InstanceId", + "Attributes" + ], + "members":{ + "InitialContactId":{ + "shape":"ContactId", + "documentation":"

The unique identifier of the contact for which to update attributes. This is the identifier for the contact associated with the first interaction with the contact center.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier for your Amazon Connect instance. To find the ID of your instance, open the AWS console and select Amazon Connect. Select the alias of the instance in the Instance alias column. The instance ID is displayed in the Overview section of your instance settings. For example, the instance ID is the set of characters at the end of the instance ARN, after instance/, such as 10a4c4eb-f57e-4d4c-b602-bf39176ced07.

" + }, + "Attributes":{ + "shape":"Attributes", + "documentation":"

The key-value pairs for the attribute to update.

" + } + } + }, + "UpdateContactAttributesResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateUserHierarchyRequest":{ "type":"structure", "required":[ @@ -1365,7 +1819,8 @@ "type":"list", "member":{"shape":"UserSummary"} }, + "Value":{"type":"double"}, "timestamp":{"type":"timestamp"} }, - "documentation":"

The Amazon Connect API Reference provides descriptions, syntax, and usage examples for each of the Amazon Connect actions, data types, parameters, and errors. Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale.

" + "documentation":"

The Amazon Connect API Reference provides descriptions, syntax, and usage examples for each of the Amazon Connect actions, data types, parameters, and errors. Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale.

There is a throttling limit placed on usage of the Amazon Connect operations that includes a RateLimit of 2 per second, and a BurstLimit of 5 per second.

" } diff --git a/botocore/data/dax/2017-04-19/service-2.json b/botocore/data/dax/2017-04-19/service-2.json index b24a4576..588e9f46 100644 --- a/botocore/data/dax/2017-04-19/service-2.json +++ b/botocore/data/dax/2017-04-19/service-2.json @@ -503,7 +503,8 @@ "required":[ "ClusterName", "NodeType", - "ReplicationFactor" + "ReplicationFactor", + "IamRoleArn" ], "members":{ "ClusterName":{ diff --git a/botocore/data/discovery/2015-11-01/service-2.json b/botocore/data/discovery/2015-11-01/service-2.json index 326ddf19..3cc37b2d 100644 --- a/botocore/data/discovery/2015-11-01/service-2.json +++ b/botocore/data/discovery/2015-11-01/service-2.json @@ -108,7 +108,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Lists agents or the Connector by ID or lists all agents/Connectors associated with your user account if you did not specify an ID.

" + "documentation":"

Lists agents or connectors as specified by ID or other filters. All agents/connectors associated with your user account can be listed if you call DescribeAgents as is without passing any parameters.

" }, "DescribeConfigurations":{ "name":"DescribeConfigurations", @@ -124,7 +124,25 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Retrieves attributes for a list of configuration item IDs. All of the supplied IDs must be for the same asset type (server, application, process, or connection). Output fields are specific to the asset type selected. For example, the output for a server configuration item includes a list of attributes about the server, such as host name, operating system, and number of network cards.

For a complete list of outputs for each asset type, see Using the DescribeConfigurations Action.

" + "documentation":"

Retrieves attributes for a list of configuration item IDs.

All of the supplied IDs must be for the same asset type from one of the follwoing:

  • server

  • application

  • process

  • connection

Output fields are specific to the asset type specified. For example, the output for a server configuration item includes a list of attributes about the server, such as host name, operating system, number of network cards, etc.

For a complete list of outputs for each asset type, see Using the DescribeConfigurations Action.

" + }, + "DescribeContinuousExports":{ + "name":"DescribeContinuousExports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeContinuousExportsRequest"}, + "output":{"shape":"DescribeContinuousExportsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists exports as specified by ID. All continuous exports associated with your user account can be listed if you call DescribeContinuousExports as is without passing any parameters.

" }, "DescribeExportConfigurations":{ "name":"DescribeExportConfigurations", @@ -141,7 +159,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Deprecated. Use DescribeExportTasks instead.

Retrieves the status of a given export process. You can retrieve status from a maximum of 100 processes.

", + "documentation":"

DescribeExportConfigurations is deprecated.

Use instead DescribeExportTasks .

", "deprecated":true }, "DescribeExportTasks":{ @@ -175,7 +193,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Retrieves a list of configuration items that are tagged with a specific tag. Or retrieves a list of all tags assigned to a specific configuration item.

" + "documentation":"

Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters.

There are three valid tag filter names:

  • tagKey

  • tagValue

  • configurationId

Also, all configuration items associated with your user account that have tags can be listed if you call DescribeTags as is without passing any parameters.

" }, "DisassociateConfigurationItemsFromApplication":{ "name":"DisassociateConfigurationItemsFromApplication", @@ -224,7 +242,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Retrieves a short summary of discovered assets.

" + "documentation":"

Retrieves a short summary of discovered assets.

This API operation takes no request parameters and is called as is at the command prompt as shown in the example.

" }, "ListConfigurations":{ "name":"ListConfigurations", @@ -241,7 +259,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"} ], - "documentation":"

Retrieves a list of configuration items according to criteria that you specify in a filter. The filter criteria identifies the relationship requirements.

" + "documentation":"

Retrieves a list of configuration items as specified by the value passed to the required paramater configurationType. Optional filtering may be applied to refine search results.

" }, "ListServerNeighbors":{ "name":"ListServerNeighbors", @@ -259,6 +277,25 @@ ], "documentation":"

Retrieves a list of servers that are one network hop away from a specified server.

" }, + "StartContinuousExport":{ + "name":"StartContinuousExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartContinuousExportRequest"}, + "output":{"shape":"StartContinuousExportResponse"}, + "errors":[ + {"shape":"ConflictErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Start the continuous flow of agent's discovered data into Amazon Athena.

" + }, "StartDataCollectionByAgentIds":{ "name":"StartDataCollectionByAgentIds", "http":{ @@ -292,6 +329,25 @@ ], "documentation":"

Begins the export of discovered data to an S3 bucket.

If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports.

If you do not include an agentIds filter, summary data is exported that includes both AWS Agentless Discovery Connector data and summary data from AWS Discovery Agents. Export of summary data is limited to two exports per day.

" }, + "StopContinuousExport":{ + "name":"StopContinuousExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopContinuousExportRequest"}, + "output":{"shape":"StopContinuousExportResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Stop the continuous flow of agent's discovered data into Amazon Athena.

" + }, "StopDataCollectionByAgentIds":{ "name":"StopDataCollectionByAgentIds", "http":{ @@ -528,6 +584,72 @@ }, "ConfigurationsDownloadUrl":{"type":"string"}, "ConfigurationsExportId":{"type":"string"}, + "ConflictErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

", + "exception":true + }, + "ContinuousExportDescription":{ + "type":"structure", + "members":{ + "exportId":{ + "shape":"ConfigurationsExportId", + "documentation":"

The unique ID assigned to this export.

" + }, + "status":{ + "shape":"ContinuousExportStatus", + "documentation":"

Describes the status of the export. Can be one of the following values:

  • START_IN_PROGRESS - setting up resources to start continuous export.

  • START_FAILED - an error occurred setting up continuous export. To recover, call start-continuous-export again.

  • ACTIVE - data is being exported to the customer bucket.

  • ERROR - an error occurred during export. To fix the issue, call stop-continuous-export and start-continuous-export.

  • STOP_IN_PROGRESS - stopping the export.

  • STOP_FAILED - an error occurred stopping the export. To recover, call stop-continuous-export again.

  • INACTIVE - the continuous export has been stopped. Data is no longer being exported to the customer bucket.

" + }, + "statusDetail":{ + "shape":"StringMax255", + "documentation":"

Contains information about any errors that may have occurred.

" + }, + "s3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the s3 bucket where the export data parquet files are stored.

" + }, + "startTime":{ + "shape":"TimeStamp", + "documentation":"

The timestamp representing when the continuous export was started.

" + }, + "stopTime":{ + "shape":"TimeStamp", + "documentation":"

The timestamp that represents when this continuous export was stopped.

" + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

The type of data collector used to gather this data (currently only offered for AGENT).

" + }, + "schemaStorageConfig":{ + "shape":"SchemaStorageConfig", + "documentation":"

An object which describes how the data is stored.

  • databaseName - the name of the Glue database used to store the schema.

" + } + }, + "documentation":"

A list of continuous export descriptions.

" + }, + "ContinuousExportDescriptions":{ + "type":"list", + "member":{"shape":"ContinuousExportDescription"} + }, + "ContinuousExportIds":{ + "type":"list", + "member":{"shape":"ConfigurationsExportId"} + }, + "ContinuousExportStatus":{ + "type":"string", + "enum":[ + "START_IN_PROGRESS", + "START_FAILED", + "ACTIVE", + "ERROR", + "STOP_IN_PROGRESS", + "STOP_FAILED", + "INACTIVE" + ] + }, "CreateApplicationRequest":{ "type":"structure", "required":["name"], @@ -659,6 +781,15 @@ }, "documentation":"

Inventory data for installed discovery connectors.

" }, + "DataSource":{ + "type":"string", + "enum":["AGENT"] + }, + "DatabaseName":{ + "type":"string", + "max":252, + "min":1 + }, "DeleteApplicationsRequest":{ "type":"structure", "required":["configurationIds"], @@ -755,20 +886,56 @@ } } }, + "DescribeContinuousExportsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "DescribeContinuousExportsRequest":{ + "type":"structure", + "members":{ + "exportIds":{ + "shape":"ContinuousExportIds", + "documentation":"

The unique IDs assigned to the exports.

" + }, + "maxResults":{ + "shape":"DescribeContinuousExportsMaxResults", + "documentation":"

A number between 1 and 100 specifying the maximum number of continuous export descriptions returned.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from the previous call to DescribeExportTasks.

" + } + } + }, + "DescribeContinuousExportsResponse":{ + "type":"structure", + "members":{ + "descriptions":{ + "shape":"ContinuousExportDescriptions", + "documentation":"

A list of continuous export descriptions.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token from the previous call to DescribeExportTasks.

" + } + } + }, "DescribeExportConfigurationsRequest":{ "type":"structure", "members":{ "exportIds":{ "shape":"ExportIds", - "documentation":"

A unique identifier that you can use to query the export status.

" + "documentation":"

A list of continuous export ids to search for.

" }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results that you want to display as a part of the query.

" + "documentation":"

A number between 1 and 100 specifying the maximum number of continuous export descriptions returned.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token to get the next set of results. For example, if you specify 100 IDs for DescribeExportConfigurationsRequest$exportIds but set DescribeExportConfigurationsRequest$maxResults to 10, you get results in a set of 10. Use the token in the query to get the next set of 10.

" + "documentation":"

The token from the previous call to describe-export-tasks.

" } } }, @@ -777,11 +944,11 @@ "members":{ "exportsInfo":{ "shape":"ExportsInfo", - "documentation":"

Returns export details. When the status is complete, the response includes a URL for an Amazon S3 bucket where you can view the data in a CSV file.

" + "documentation":"

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token to get the next set of results. For example, if you specify 100 IDs for DescribeExportConfigurationsRequest$exportIds but set DescribeExportConfigurationsRequest$maxResults to 10, you get results in a set of 10. Use the token in the query to get the next set of 10.

" + "documentation":"

The token from the previous call to describe-export-tasks.

" } } }, @@ -1212,6 +1379,14 @@ "type":"list", "member":{"shape":"OrderByElement"} }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

", + "exception":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -1220,6 +1395,12 @@ "documentation":"

The specified configuration ID was not located. Verify the configuration ID and try again.

", "exception":true }, + "S3Bucket":{"type":"string"}, + "SchemaStorageConfig":{ + "type":"map", + "key":{"shape":"DatabaseName"}, + "value":{"shape":"String"} + }, "ServerInternalErrorException":{ "type":"structure", "members":{ @@ -1229,6 +1410,36 @@ "exception":true, "fault":true }, + "StartContinuousExportRequest":{ + "type":"structure", + "members":{ + } + }, + "StartContinuousExportResponse":{ + "type":"structure", + "members":{ + "exportId":{ + "shape":"ConfigurationsExportId", + "documentation":"

The unique ID assigned to this export.

" + }, + "s3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the s3 bucket where the export data parquet files are stored.

" + }, + "startTime":{ + "shape":"TimeStamp", + "documentation":"

The timestamp representing when the continuous export was started.

" + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

The type of data collector used to gather this data (currently only offered for AGENT).

" + }, + "schemaStorageConfig":{ + "shape":"SchemaStorageConfig", + "documentation":"

A dictionary which describes how the data is stored.

  • databaseName - the name of the Glue database used to store the schema.

" + } + } + }, "StartDataCollectionByAgentIdsRequest":{ "type":"structure", "required":["agentIds"], @@ -1278,6 +1489,29 @@ } } }, + "StopContinuousExportRequest":{ + "type":"structure", + "required":["exportId"], + "members":{ + "exportId":{ + "shape":"ConfigurationsExportId", + "documentation":"

The unique ID assigned to this export.

" + } + } + }, + "StopContinuousExportResponse":{ + "type":"structure", + "members":{ + "startTime":{ + "shape":"TimeStamp", + "documentation":"

Timestamp that represents when this continuous export started collecting data.

" + }, + "stopTime":{ + "shape":"TimeStamp", + "documentation":"

Timestamp that represents when this continuous export was stopped.

" + } + } + }, "StopDataCollectionByAgentIdsRequest":{ "type":"structure", "required":["agentIds"], @@ -1298,6 +1532,11 @@ } }, "String":{"type":"string"}, + "StringMax255":{ + "type":"string", + "max":255, + "min":1 + }, "Tag":{ "type":"structure", "required":[ diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json index 658e5448..91c290e5 100644 --- a/botocore/data/dlm/2018-01-12/service-2.json +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -120,7 +120,7 @@ }, "PolicyDetails":{ "shape":"PolicyDetails", - "documentation":"

The configuration of the lifecycle policy.

Target tags cannot be re-used across lifecycle policies.

" + "documentation":"

The configuration details of the lifecycle policy.

Target tags cannot be re-used across lifecycle policies.

" } } }, @@ -198,13 +198,13 @@ }, "TargetTags":{ "shape":"TargetTagsFilterList", - "documentation":"

The target tags.

Tags are strings in the format key:value.

", + "documentation":"

The target tag for a policy.

Tags are strings in the format key=value.

", "location":"querystring", "locationName":"targetTags" }, "TagsToAdd":{ "shape":"TagsToAddFilterList", - "documentation":"

The tags to add to the resources.

Tags are strings in the format key:value.

These tags are added in addition to the AWS-added lifecycle tags.

", + "documentation":"

The tags to add to objects created by the policy.

Tags are strings in the format key=value.

These user-defined tags are added in addition to the AWS-added lifecycle tags.

", "location":"querystring", "locationName":"tagsToAdd" } @@ -373,11 +373,11 @@ }, "TargetTags":{ "shape":"TargetTagList", - "documentation":"

The target tags.

" + "documentation":"

The single tag that identifies targeted resources for this policy.

" }, "Schedules":{ "shape":"ScheduleList", - "documentation":"

The schedule.

" + "documentation":"

The schedule of policy-defined actions.

" } }, "documentation":"

Specifies the configuration of a lifecycle policy.

" @@ -435,7 +435,7 @@ }, "TagsToAdd":{ "shape":"TagsToAddList", - "documentation":"

The tags to add to policy-created resources. These tags are added in addition to the default lifecycle tags.

" + "documentation":"

The tags to apply to policy-created resources. These user-defined tags are in addition to the AWS-added lifecycle tags.

" }, "CreateRule":{ "shape":"CreateRule", diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index dd153440..ff603ae2 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -13,6 +13,23 @@ "uid":"ds-2015-04-16" }, "operations":{ + "AcceptSharedDirectory":{ + "name":"AcceptSharedDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptSharedDirectoryRequest"}, + "output":{"shape":"AcceptSharedDirectoryResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryAlreadySharedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Accepts a directory sharing request that was sent from the directory owner account.

" + }, "AddIpRoutes":{ "name":"AddIpRoutes", "http":{ @@ -78,7 +95,7 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Creates an AD Connector to connect to an on-premises directory.

Before you call ConnectDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the ConnectDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" + "documentation":"

Creates an AD Connector to connect to an on-premises directory.

Before you call ConnectDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the ConnectDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" }, "CreateAlias":{ "name":"CreateAlias", @@ -150,7 +167,25 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Creates a Simple AD directory.

Before you call CreateDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the CreateDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" + "documentation":"

Creates a Simple AD directory.

Before you call CreateDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the CreateDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" + }, + "CreateLogSubscription":{ + "name":"CreateLogSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLogSubscriptionRequest"}, + "output":{"shape":"CreateLogSubscriptionResult"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Creates a subscription to forward real time Directory Service domain controller security logs to the specified CloudWatch log group in your AWS account.

" }, "CreateMicrosoftAD":{ "name":"CreateMicrosoftAD", @@ -235,7 +270,23 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Deletes an AWS Directory Service directory.

Before you call DeleteDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the DeleteDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" + "documentation":"

Deletes an AWS Directory Service directory.

Before you call DeleteDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the DeleteDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" + }, + "DeleteLogSubscription":{ + "name":"DeleteLogSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLogSubscriptionRequest"}, + "output":{"shape":"DeleteLogSubscriptionResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Deletes the specified log subscription.

" }, "DeleteSnapshot":{ "name":"DeleteSnapshot", @@ -319,7 +370,7 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Obtains information about the directories that belong to this account.

You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items.

You can also specify a maximum number of return results with the Limit parameter.

" + "documentation":"

Obtains information about the directories that belong to this account.

You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items.

You can also specify a maximum number of return results with the Limit parameter.

" }, "DescribeDomainControllers":{ "name":"DescribeDomainControllers", @@ -355,6 +406,24 @@ ], "documentation":"

Obtains information about which SNS topics receive status messages from the specified directory.

If no input parameters are provided, such as DirectoryId or TopicName, this request describes all of the associations in the account.

" }, + "DescribeSharedDirectories":{ + "name":"DescribeSharedDirectories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSharedDirectoriesRequest"}, + "output":{"shape":"DescribeSharedDirectoriesResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Returns the shared directories in your account.

" + }, "DescribeSnapshots":{ "name":"DescribeSnapshots", "http":{ @@ -403,7 +472,7 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Disables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector directory.

" + "documentation":"

Disables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.

" }, "DisableSso":{ "name":"DisableSso", @@ -437,7 +506,7 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector directory.

" + "documentation":"

Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.

" }, "EnableSso":{ "name":"EnableSso", @@ -503,6 +572,22 @@ ], "documentation":"

Lists the address blocks that you have added to a directory.

" }, + "ListLogSubscriptions":{ + "name":"ListLogSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLogSubscriptionsRequest"}, + "output":{"shape":"ListLogSubscriptionsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Lists the active log subscriptions for the AWS account.

" + }, "ListSchemaExtensions":{ "name":"ListSchemaExtensions", "http":{ @@ -552,6 +637,23 @@ ], "documentation":"

Associates a directory with an SNS topic. This establishes the directory as a publisher to the specified SNS topic. You can then receive email or text (SMS) messages when the status of your directory changes. You get notified if your directory goes from an Active status to an Impaired or Inoperable status. You also receive a notification when the directory returns to an Active status.

" }, + "RejectSharedDirectory":{ + "name":"RejectSharedDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectSharedDirectoryRequest"}, + "output":{"shape":"RejectSharedDirectoryResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryAlreadySharedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Rejects a directory sharing request that was sent from the directory owner account.

" + }, "RemoveIpRoutes":{ "name":"RemoveIpRoutes", "http":{ @@ -620,6 +722,28 @@ ], "documentation":"

Restores a directory using an existing directory snapshot.

When you restore a directory from a snapshot, any changes made to the directory after the snapshot date are overwritten.

This action returns as soon as the restore operation is initiated. You can monitor the progress of the restore operation by calling the DescribeDirectories operation with the directory identifier. When the DirectoryDescription.Stage value changes to Active, the restore operation is complete.

" }, + "ShareDirectory":{ + "name":"ShareDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ShareDirectoryRequest"}, + "output":{"shape":"ShareDirectoryResult"}, + "errors":[ + {"shape":"DirectoryAlreadySharedException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidTargetException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ShareLimitExceededException"}, + {"shape":"OrganizationsException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Shares a specified directory (DirectoryId) in your AWS account (directory owner) with another AWS account (directory consumer). With this operation you can use your directory from any AWS account and from any Amazon VPC within an AWS Region.

When you share your AWS Managed Microsoft AD directory, AWS Directory Service creates a shared directory in the directory consumer account. This shared directory contains the metadata to provide access to the directory within the directory owner account. The shared directory is visible in all VPCs in the directory consumer account.

The ShareMethod parameter determines whether the specified directory can be shared between AWS accounts inside the same AWS organization (ORGANIZATIONS). It also determines whether you can share the directory with any other AWS account either inside or outside of the organization (HANDSHAKE).

The ShareNotes parameter is only used when HANDSHAKE is called, which sends a directory sharing request to the directory consumer.

" + }, "StartSchemaExtension":{ "name":"StartSchemaExtension", "http":{ @@ -638,6 +762,23 @@ ], "documentation":"

Applies a schema extension to a Microsoft AD directory.

" }, + "UnshareDirectory":{ + "name":"UnshareDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnshareDirectoryRequest"}, + "output":{"shape":"UnshareDirectoryResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidTargetException"}, + {"shape":"DirectoryNotSharedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Stops the directory sharing between the directory owner and consumer accounts.

" + }, "UpdateConditionalForwarder":{ "name":"UpdateConditionalForwarder", "http":{ @@ -689,7 +830,7 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector directory.

" + "documentation":"

Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector or Microsoft AD directory.

" }, "VerifyTrust":{ "name":"VerifyTrust", @@ -710,6 +851,34 @@ } }, "shapes":{ + "AcceptSharedDirectoryRequest":{ + "type":"structure", + "required":["SharedDirectoryId"], + "members":{ + "SharedDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the shared directory in the directory consumer account. This identifier is different for each directory owner account.

" + } + } + }, + "AcceptSharedDirectoryResult":{ + "type":"structure", + "members":{ + "SharedDirectory":{ + "shape":"SharedDirectory", + "documentation":"

The shared directory in the directory consumer account.

" + } + } + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "exception":true + }, "AccessUrl":{ "type":"string", "max":128, @@ -916,7 +1085,7 @@ "members":{ "Name":{ "shape":"DirectoryName", - "documentation":"

The fully-qualified name of the on-premises directory, such as corp.example.com.

" + "documentation":"

The fully qualified name of the on-premises directory, such as corp.example.com.

" }, "ShortName":{ "shape":"DirectoryShortName", @@ -1078,7 +1247,7 @@ }, "Password":{ "shape":"Password", - "documentation":"

The password for the directory administrator. The directory creation process creates a directory administrator account with the username Administrator and this password.

" + "documentation":"

The password for the directory administrator. The directory creation process creates a directory administrator account with the user name Administrator and this password.

" }, "Description":{ "shape":"Description", @@ -1105,6 +1274,28 @@ }, "documentation":"

Contains the results of the CreateDirectory operation.

" }, + "CreateLogSubscriptionRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "LogGroupName" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier (ID) of the directory to which you want to subscribe and receive real-time logs to your specified CloudWatch log group.

" + }, + "LogGroupName":{ + "shape":"LogGroupName", + "documentation":"

The name of the CloudWatch log group where the real-time domain controller logs are forwarded.

" + } + } + }, + "CreateLogSubscriptionResult":{ + "type":"structure", + "members":{ + } + }, "CreateMicrosoftADRequest":{ "type":"structure", "required":[ @@ -1223,6 +1414,10 @@ "documentation":"

The result of a CreateTrust request.

" }, "CreatedDateTime":{"type":"timestamp"}, + "CustomerId":{ + "type":"string", + "pattern":"^(\\d{12})$" + }, "CustomerUserName":{ "type":"string", "max":64, @@ -1275,6 +1470,21 @@ }, "documentation":"

Contains the results of the DeleteDirectory operation.

" }, + "DeleteLogSubscriptionRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier (ID) of the directory whose log subscription you want to delete.

" + } + } + }, + "DeleteLogSubscriptionResult":{ + "type":"structure", + "members":{ + } + }, "DeleteSnapshotRequest":{ "type":"structure", "required":["SnapshotId"], @@ -1379,7 +1589,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. Pass null if this is the first call.

" + "documentation":"

The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. Pass null if this is the first call.

" }, "Limit":{ "shape":"Limit", @@ -1393,11 +1603,11 @@ "members":{ "DirectoryDescriptions":{ "shape":"DirectoryDescriptions", - "documentation":"

The list of DirectoryDescription objects that were retrieved.

It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

" + "documentation":"

The list of DirectoryDescription objects that were retrieved.

It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDirectories to retrieve the next set of items.

" + "documentation":"

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDirectories to retrieve the next set of items.

" } }, "documentation":"

Contains the results of the DescribeDirectories operation.

" @@ -1461,6 +1671,41 @@ }, "documentation":"

The result of a DescribeEventTopic request.

" }, + "DescribeSharedDirectoriesRequest":{ + "type":"structure", + "required":["OwnerDirectoryId"], + "members":{ + "OwnerDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Returns the identifier of the directory in the directory owner account.

" + }, + "SharedDirectoryIds":{ + "shape":"DirectoryIds", + "documentation":"

A list of identifiers of all shared directories in your account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The DescribeSharedDirectoriesResult.NextToken value from a previous call to DescribeSharedDirectories. Pass null if this is the first call.

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The number of shared directories to return in the response object.

" + } + } + }, + "DescribeSharedDirectoriesResult":{ + "type":"structure", + "members":{ + "SharedDirectories":{ + "shape":"SharedDirectories", + "documentation":"

A list of all shared directories in your account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If not null, token that indicates that more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeSharedDirectories to retrieve the next set of items.

" + } + } + }, "DescribeSnapshotsRequest":{ "type":"structure", "members":{ @@ -1543,6 +1788,15 @@ "type":"integer", "min":2 }, + "DirectoryAlreadySharedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The specified directory has already been shared with this AWS account.

", + "exception":true + }, "DirectoryConnectSettings":{ "type":"structure", "required":[ @@ -1566,7 +1820,7 @@ }, "CustomerUserName":{ "shape":"UserName", - "documentation":"

The username of an account in the on-premises directory that is used to connect to the directory. This account must have the following privileges:

  • Read users and groups

  • Create computer objects

  • Join computers to the domain

" + "documentation":"

The user name of an account in the on-premises directory that is used to connect to the directory. This account must have the following permissions:

  • Read users and groups

  • Create computer objects

  • Join computers to the domain

" } }, "documentation":"

Contains information for the ConnectDirectory operation when an AD Connector directory is being created.

" @@ -1584,7 +1838,7 @@ }, "CustomerUserName":{ "shape":"UserName", - "documentation":"

The username of the service account in the on-premises directory.

" + "documentation":"

The user name of the service account in the on-premises directory.

" }, "SecurityGroupId":{ "shape":"SecurityGroupId", @@ -1610,7 +1864,7 @@ }, "Name":{ "shape":"DirectoryName", - "documentation":"

The fully-qualified name of the directory.

" + "documentation":"

The fully qualified name of the directory.

" }, "ShortName":{ "shape":"DirectoryShortName", @@ -1644,6 +1898,18 @@ "shape":"DirectoryStage", "documentation":"

The current stage of the directory.

" }, + "ShareStatus":{ + "shape":"ShareStatus", + "documentation":"

Current directory status of the shared AWS Managed Microsoft AD directory.

" + }, + "ShareMethod":{ + "shape":"ShareMethod", + "documentation":"

The method used when sharing a directory to determine whether the directory should be shared within your AWS organization (ORGANIZATIONS) or with any AWS account by sending a shared directory request (HANDSHAKE).

" + }, + "ShareNotes":{ + "shape":"Notes", + "documentation":"

A directory share request that is sent by the directory owner to the directory consumer. The request includes a typed message to help the directory consumer administrator determine whether to approve or reject the share invitation.

" + }, "LaunchTime":{ "shape":"LaunchTime", "documentation":"

Specifies when the directory was created.

" @@ -1678,11 +1944,15 @@ }, "SsoEnabled":{ "shape":"SsoEnabled", - "documentation":"

Indicates if single-sign on is enabled for the directory. For more information, see EnableSso and DisableSso.

" + "documentation":"

Indicates if single sign-on is enabled for the directory. For more information, see EnableSso and DisableSso.

" }, "DesiredNumberOfDomainControllers":{ "shape":"DesiredNumberOfDomainControllers", "documentation":"

The desired number of domain controllers in the directory if the directory is Microsoft AD.

" + }, + "OwnerDirectoryDescription":{ + "shape":"OwnerDirectoryDescription", + "documentation":"

Describes the AWS Managed Microsoft AD directory in the directory owner account.

" } }, "documentation":"

Contains information about an AWS Directory Service directory.

" @@ -1763,6 +2033,15 @@ "type":"string", "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+$" }, + "DirectoryNotSharedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The specified directory has not been shared with this AWS account.

", + "exception":true + }, "DirectoryShortName":{ "type":"string", "pattern":"^[^\\\\/:*?\\\"\\<\\>|.]+[^\\\\/:*?\\\"<>|]*$" @@ -1795,7 +2074,8 @@ "enum":[ "SimpleAD", "ADConnector", - "MicrosoftAD" + "MicrosoftAD", + "SharedMicrosoftAD" ] }, "DirectoryUnavailableException":{ @@ -2127,7 +2407,7 @@ "Message":{"shape":"ExceptionMessage"}, "RequestId":{"shape":"RequestId"} }, - "documentation":"

The NextToken value is not valid.

", + "documentation":"

The NextToken value is not valid.

", "exception":true }, "InvalidParameterException":{ @@ -2148,6 +2428,15 @@ "documentation":"

The new password provided by the user does not meet the password complexity requirements defined in your directory.

", "exception":true }, + "InvalidTargetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The specified shared target is not valid.

", + "exception":true + }, "IpAddr":{ "type":"string", "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" @@ -2271,6 +2560,36 @@ } } }, + "ListLogSubscriptionsRequest":{ + "type":"structure", + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

If a DirectoryID is provided, lists only the log subscription associated with that directory. If no DirectoryId is provided, lists all log subscriptions associated with your AWS account. If there are no log subscriptions for the AWS account or the directory, an empty list will be returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return.

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The maximum number of items returned.

" + } + } + }, + "ListLogSubscriptionsResult":{ + "type":"structure", + "members":{ + "LogSubscriptions":{ + "shape":"LogSubscriptions", + "documentation":"

A list of active LogSubscription objects for calling the AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return.

" + } + } + }, "ListSchemaExtensionsRequest":{ "type":"structure", "required":["DirectoryId"], @@ -2333,13 +2652,85 @@ } } }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[-._/#A-Za-z0-9]+" + }, + "LogSubscription":{ + "type":"structure", + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier (ID) of the directory that you want to associate with the log subscription.

" + }, + "LogGroupName":{ + "shape":"LogGroupName", + "documentation":"

The name of the log group.

" + }, + "SubscriptionCreatedDateTime":{ + "shape":"SubscriptionCreatedDateTime", + "documentation":"

The date and time that the log subscription was created.

" + } + }, + "documentation":"

Represents a log subscription, which tracks real-time data from a chosen log group to a specified destination.

" + }, + "LogSubscriptions":{ + "type":"list", + "member":{"shape":"LogSubscription"} + }, "ManualSnapshotsLimitReached":{"type":"boolean"}, "NextToken":{"type":"string"}, + "Notes":{ + "type":"string", + "max":1024, + "sensitive":true + }, "OrganizationalUnitDN":{ "type":"string", "max":2000, "min":1 }, + "OrganizationsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

Exception encountered while trying to access your AWS organization.

", + "exception":true + }, + "OwnerDirectoryDescription":{ + "type":"structure", + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the AWS Managed Microsoft AD directory in the directory owner account.

" + }, + "AccountId":{ + "shape":"CustomerId", + "documentation":"

Identifier of the directory owner account.

" + }, + "DnsIpAddrs":{ + "shape":"DnsIpAddrs", + "documentation":"

IP address of the directory’s domain controllers.

" + }, + "VpcSettings":{ + "shape":"DirectoryVpcSettingsDescription", + "documentation":"

Information about the VPC settings for the directory.

" + }, + "RadiusSettings":{ + "shape":"RadiusSettings", + "documentation":"

A RadiusSettings object that contains information about the RADIUS server.

" + }, + "RadiusStatus":{ + "shape":"RadiusStatus", + "documentation":"

Information about the status of the RADIUS server.

" + } + }, + "documentation":"

Describes the directory owner account details that have been shared to the directory consumer account.

" + }, "Password":{ "type":"string", "pattern":"(?=^.{8,64}$)((?=.*\\d)(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[^A-Za-z0-9\\s])(?=.*[a-z])|(?=.*[^A-Za-z0-9\\s])(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\\s]))^.*", @@ -2390,7 +2781,7 @@ }, "SharedSecret":{ "shape":"RadiusSharedSecret", - "documentation":"

Not currently used.

" + "documentation":"

Required for enabling RADIUS on the directory.

" }, "AuthenticationProtocol":{ "shape":"RadiusAuthenticationProtocol", @@ -2450,6 +2841,25 @@ }, "documentation":"

The result of a RegisterEventTopic request.

" }, + "RejectSharedDirectoryRequest":{ + "type":"structure", + "required":["SharedDirectoryId"], + "members":{ + "SharedDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the shared directory in the directory consumer account. This identifier is different for each directory owner account.

" + } + } + }, + "RejectSharedDirectoryResult":{ + "type":"structure", + "members":{ + "SharedDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the shared directory in the directory consumer account.

" + } + } + }, "RemoteDomainName":{ "type":"string", "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$" @@ -2525,7 +2935,7 @@ }, "UserName":{ "shape":"CustomerUserName", - "documentation":"

The username of the user whose password will be reset.

" + "documentation":"

The user name of the user whose password will be reset.

" }, "NewPassword":{ "shape":"UserPassword", @@ -2645,6 +3055,135 @@ "exception":true, "fault":true }, + "ShareDirectoryRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "ShareTarget", + "ShareMethod" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the AWS Managed Microsoft AD directory that you want to share with other AWS accounts.

" + }, + "ShareNotes":{ + "shape":"Notes", + "documentation":"

A directory share request that is sent by the directory owner to the directory consumer. The request includes a typed message to help the directory consumer administrator determine whether to approve or reject the share invitation.

" + }, + "ShareTarget":{ + "shape":"ShareTarget", + "documentation":"

Identifier for the directory consumer account with whom the directory is to be shared.

" + }, + "ShareMethod":{ + "shape":"ShareMethod", + "documentation":"

The method used when sharing a directory to determine whether the directory should be shared within your AWS organization (ORGANIZATIONS) or with any AWS account by sending a directory sharing request (HANDSHAKE).

" + } + } + }, + "ShareDirectoryResult":{ + "type":"structure", + "members":{ + "SharedDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the directory that is stored in the directory consumer account that is shared from the specified directory (DirectoryId).

" + } + } + }, + "ShareLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The maximum number of AWS accounts that you can share with this directory has been reached.

", + "exception":true + }, + "ShareMethod":{ + "type":"string", + "enum":[ + "ORGANIZATIONS", + "HANDSHAKE" + ] + }, + "ShareStatus":{ + "type":"string", + "enum":[ + "Shared", + "PendingAcceptance", + "Rejected", + "Rejecting", + "RejectFailed", + "Sharing", + "ShareFailed", + "Deleted", + "Deleting" + ] + }, + "ShareTarget":{ + "type":"structure", + "required":[ + "Id", + "Type" + ], + "members":{ + "Id":{ + "shape":"TargetId", + "documentation":"

Identifier of the directory consumer account.

" + }, + "Type":{ + "shape":"TargetType", + "documentation":"

Type of identifier to be used in the Id field.

" + } + }, + "documentation":"

Identifier that contains details about the directory consumer account.

" + }, + "SharedDirectories":{ + "type":"list", + "member":{"shape":"SharedDirectory"} + }, + "SharedDirectory":{ + "type":"structure", + "members":{ + "OwnerAccountId":{ + "shape":"CustomerId", + "documentation":"

Identifier of the directory owner account, which contains the directory that has been shared to the consumer account.

" + }, + "OwnerDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the directory in the directory owner account.

" + }, + "ShareMethod":{ + "shape":"ShareMethod", + "documentation":"

The method used when sharing a directory to determine whether the directory should be shared within your AWS organization (ORGANIZATIONS) or with any AWS account by sending a shared directory request (HANDSHAKE).

" + }, + "SharedAccountId":{ + "shape":"CustomerId", + "documentation":"

Identifier of the directory consumer account that has access to the shared directory (OwnerDirectoryId) in the directory owner account.

" + }, + "SharedDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the shared directory in the directory consumer account. This identifier is different for each directory owner account.

" + }, + "ShareStatus":{ + "shape":"ShareStatus", + "documentation":"

Current directory status of the shared AWS Managed Microsoft AD directory.

" + }, + "ShareNotes":{ + "shape":"Notes", + "documentation":"

A directory share request that is sent by the directory owner to the directory consumer. The request includes a typed message to help the directory consumer administrator determine whether to approve or reject the share invitation.

" + }, + "CreatedDateTime":{ + "shape":"CreatedDateTime", + "documentation":"

The date and time that the shared directory was created.

" + }, + "LastUpdatedDateTime":{ + "shape":"LastUpdatedDateTime", + "documentation":"

The date and time that the shared directory was last updated.

" + } + }, + "documentation":"

Details about the shared directory in the directory owner account for which the share request in the directory consumer account has been accepted.

" + }, "Snapshot":{ "type":"structure", "members":{ @@ -2786,6 +3325,7 @@ "type":"list", "member":{"shape":"SubnetId"} }, + "SubscriptionCreatedDateTime":{"type":"timestamp"}, "Tag":{ "type":"structure", "required":[ @@ -2833,6 +3373,15 @@ "type":"list", "member":{"shape":"Tag"} }, + "TargetId":{ + "type":"string", + "max":64, + "min":1 + }, + "TargetType":{ + "type":"string", + "enum":["ACCOUNT"] + }, "TopicArn":{"type":"string"}, "TopicName":{ "type":"string", @@ -2943,6 +3492,50 @@ "type":"list", "member":{"shape":"Trust"} }, + "UnshareDirectoryRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "UnshareTarget" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the AWS Managed Microsoft AD directory that you want to stop sharing.

" + }, + "UnshareTarget":{ + "shape":"UnshareTarget", + "documentation":"

Identifier for the directory consumer account with whom the directory has to be unshared.

" + } + } + }, + "UnshareDirectoryResult":{ + "type":"structure", + "members":{ + "SharedDirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the directory stored in the directory consumer account that is to be unshared from the specified directory (DirectoryId).

" + } + } + }, + "UnshareTarget":{ + "type":"structure", + "required":[ + "Id", + "Type" + ], + "members":{ + "Id":{ + "shape":"TargetId", + "documentation":"

Identifier of the directory consumer account.

" + }, + "Type":{ + "shape":"TargetType", + "documentation":"

Type of identifier to be used in the Id field.

" + } + }, + "documentation":"

Identifier that contains details about the directory consumer account with whom the directory is being unshared.

" + }, "UnsupportedOperationException":{ "type":"structure", "members":{ diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 575000b1..cc8d44a6 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -26,7 +26,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "BatchWriteItem":{ "name":"BatchWriteItem", @@ -42,7 +44,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

" + "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

  • One or more tables specified in the BatchWriteItem request does not exist.

  • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

  • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

  • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

  • There are more than 25 requests in the batch.

  • Any individual item in a batch exceeds 400 KB.

  • The total request size exceeds 16 MB.

", + "endpointdiscovery":{ + } }, "CreateBackup":{ "name":"CreateBackup", @@ -60,7 +64,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Streams

  • Provisioned read and write capacity

" + "documentation":"

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Streams

  • Provisioned read and write capacity

", + "endpointdiscovery":{ + } }, "CreateGlobalTable":{ "name":"CreateGlobalTable", @@ -76,7 +82,9 @@ {"shape":"GlobalTableAlreadyExistsException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

" + "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", + "endpointdiscovery":{ + } }, "CreateTable":{ "name":"CreateTable", @@ -91,7 +99,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

" + "documentation":"

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

", + "endpointdiscovery":{ + } }, "DeleteBackup":{ "name":"DeleteBackup", @@ -107,7 +117,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes an existing backup of a table.

You can call DeleteBackup at a maximum rate of 10 times per second.

" + "documentation":"

Deletes an existing backup of a table.

You can call DeleteBackup at a maximum rate of 10 times per second.

", + "endpointdiscovery":{ + } }, "DeleteItem":{ "name":"DeleteItem", @@ -124,7 +136,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

" + "documentation":"

Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

", + "endpointdiscovery":{ + } }, "DeleteTable":{ "name":"DeleteTable", @@ -140,7 +154,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

" + "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

", + "endpointdiscovery":{ + } }, "DescribeBackup":{ "name":"DescribeBackup", @@ -154,7 +170,9 @@ {"shape":"BackupNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes an existing backup of a table.

You can call DescribeBackup at a maximum rate of 10 times per second.

" + "documentation":"

Describes an existing backup of a table.

You can call DescribeBackup at a maximum rate of 10 times per second.

", + "endpointdiscovery":{ + } }, "DescribeContinuousBackups":{ "name":"DescribeContinuousBackups", @@ -168,7 +186,19 @@ {"shape":"TableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

" + "documentation":"

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

", + "endpointdiscovery":{ + } + }, + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true }, "DescribeGlobalTable":{ "name":"DescribeGlobalTable", @@ -182,7 +212,9 @@ {"shape":"InternalServerError"}, {"shape":"GlobalTableNotFoundException"} ], - "documentation":"

Returns information about the specified global table.

" + "documentation":"

Returns information about the specified global table.

", + "endpointdiscovery":{ + } }, "DescribeGlobalTableSettings":{ "name":"DescribeGlobalTableSettings", @@ -196,7 +228,9 @@ {"shape":"GlobalTableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes region specific settings for a global table.

" + "documentation":"

Describes region specific settings for a global table.

", + "endpointdiscovery":{ + } }, "DescribeLimits":{ "name":"DescribeLimits", @@ -209,7 +243,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

    • Call DescribeTable with the table name.

    • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

    • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

  5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

" + "documentation":"

Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

    • Call DescribeTable with the table name.

    • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

    • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

  5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

", + "endpointdiscovery":{ + } }, "DescribeTable":{ "name":"DescribeTable", @@ -223,7 +259,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

" + "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", + "endpointdiscovery":{ + } }, "DescribeTimeToLive":{ "name":"DescribeTimeToLive", @@ -237,7 +275,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gives a description of the Time to Live (TTL) status on the specified table.

" + "documentation":"

Gives a description of the Time to Live (TTL) status on the specified table.

", + "endpointdiscovery":{ + } }, "GetItem":{ "name":"GetItem", @@ -252,7 +292,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

" + "documentation":"

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

", + "endpointdiscovery":{ + } }, "ListBackups":{ "name":"ListBackups", @@ -265,7 +307,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of 5 times per second.

" + "documentation":"

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of 5 times per second.

", + "endpointdiscovery":{ + } }, "ListGlobalTables":{ "name":"ListGlobalTables", @@ -278,7 +322,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all global tables that have a replica in the specified region.

" + "documentation":"

Lists all global tables that have a replica in the specified region.

", + "endpointdiscovery":{ + } }, "ListTables":{ "name":"ListTables", @@ -291,7 +337,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

" + "documentation":"

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

", + "endpointdiscovery":{ + } }, "ListTagsOfResource":{ "name":"ListTagsOfResource", @@ -305,7 +353,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

" + "documentation":"

List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "PutItem":{ "name":"PutItem", @@ -322,7 +372,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "Query":{ "name":"Query", @@ -337,7 +389,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

" + "documentation":"

The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

", + "endpointdiscovery":{ + } }, "RestoreTableFromBackup":{ "name":"RestoreTableFromBackup", @@ -355,7 +409,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

" + "documentation":"

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

", + "endpointdiscovery":{ + } }, "RestoreTableToPointInTime":{ "name":"RestoreTableToPointInTime", @@ -374,7 +430,9 @@ {"shape":"PointInTimeRecoveryUnavailableException"}, {"shape":"InternalServerError"} ], - "documentation":"

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Provisioned read and write capacity

  • Encryption settings

    All these settings come from the current settings of the source table at the time of restore.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

  • Point in time recovery settings

" + "documentation":"

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

  • Global secondary indexes (GSIs)

  • Local secondary indexes (LSIs)

  • Provisioned read and write capacity

  • Encryption settings

    All these settings come from the current settings of the source table at the time of restore.

You must manually set up the following on the restored table:

  • Auto scaling policies

  • IAM policies

  • Cloudwatch metrics and alarms

  • Tags

  • Stream settings

  • Time to Live (TTL) settings

  • Point in time recovery settings

", + "endpointdiscovery":{ + } }, "Scan":{ "name":"Scan", @@ -389,7 +447,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

" + "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

", + "endpointdiscovery":{ + } }, "TagResource":{ "name":"TagResource", @@ -404,7 +464,9 @@ {"shape":"InternalServerError"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "UntagResource":{ "name":"UntagResource", @@ -419,7 +481,9 @@ {"shape":"InternalServerError"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "UpdateContinuousBackups":{ "name":"UpdateContinuousBackups", @@ -434,7 +498,9 @@ {"shape":"ContinuousBackupsUnavailableException"}, {"shape":"InternalServerError"} ], - "documentation":"

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days..

" + "documentation":"

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days..

", + "endpointdiscovery":{ + } }, "UpdateGlobalTable":{ "name":"UpdateGlobalTable", @@ -451,7 +517,9 @@ {"shape":"ReplicaNotFoundException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

" + "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

  • The global secondary indexes must have the same provisioned and maximum write capacity units.

", + "endpointdiscovery":{ + } }, "UpdateGlobalTableSettings":{ "name":"UpdateGlobalTableSettings", @@ -469,7 +537,9 @@ {"shape":"ResourceInUseException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates settings for a global table.

" + "documentation":"

Updates settings for a global table.

", + "endpointdiscovery":{ + } }, "UpdateItem":{ "name":"UpdateItem", @@ -486,7 +556,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

" + "documentation":"

Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

", + "endpointdiscovery":{ + } }, "UpdateTable":{ "name":"UpdateTable", @@ -502,7 +574,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Enable or disable Streams on the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. Once the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

" + "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

  • Modify the provisioned throughput settings of the table.

  • Enable or disable Streams on the table.

  • Remove a global secondary index from the table.

  • Create a new global secondary index on the table. Once the index begins backfilling, you can use UpdateTable to perform other operations.

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", + "endpointdiscovery":{ + } }, "UpdateTimeToLive":{ "name":"UpdateTimeToLive", @@ -518,7 +592,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } } }, "shapes":{ @@ -831,7 +907,7 @@ }, "BackupType":{ "shape":"BackupType", - "documentation":"

BackupType:

  • USER - On demand backup created by you.

  • SYSTEM - On demand backup automatically created by DynamoDB.

" + "documentation":"

BackupType:

  • USER - On-demand backup created by you.

  • SYSTEM - On-demand backup automatically created by DynamoDB.

" }, "BackupCreationDateTime":{ "shape":"BackupCreationDateTime", @@ -839,7 +915,7 @@ }, "BackupExpiryDateTime":{ "shape":"Date", - "documentation":"

Time at which the automatic on demand backup created by DynamoDB will expire. This SYSTEM on demand backup expires automatically 35 days after its creation.

" + "documentation":"

Time at which the automatic on-demand backup created by DynamoDB will expire. This SYSTEM on-demand backup expires automatically 35 days after its creation.

" } }, "documentation":"

Contains the details of the backup created for the table.

" @@ -911,7 +987,7 @@ }, "BackupExpiryDateTime":{ "shape":"Date", - "documentation":"

Time at which the automatic on demand backup created by DynamoDB will expire. This SYSTEM on demand backup expires automatically 35 days after its creation.

" + "documentation":"

Time at which the automatic on-demand backup created by DynamoDB will expire. This SYSTEM on-demand backup expires automatically 35 days after its creation.

" }, "BackupStatus":{ "shape":"BackupStatus", @@ -919,7 +995,7 @@ }, "BackupType":{ "shape":"BackupType", - "documentation":"

BackupType:

  • USER - On demand backup created by you.

  • SYSTEM - On demand backup automatically created by DynamoDB.

" + "documentation":"

BackupType:

  • USER - On-demand backup created by you.

  • SYSTEM - On-demand backup automatically created by DynamoDB.

" }, "BackupSizeBytes":{ "shape":"BackupSizeBytes", @@ -1138,7 +1214,7 @@ "members":{ "ContinuousBackupsStatus":{ "shape":"ContinuousBackupsStatus", - "documentation":"

ContinuousBackupsStatus can be one of the following states : ENABLED, DISABLED

" + "documentation":"

ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED

" }, "PointInTimeRecoveryDescription":{ "shape":"PointInTimeRecoveryDescription", @@ -1484,6 +1560,18 @@ } } }, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{"shape":"Endpoints"} + } + }, "DescribeGlobalTableInput":{ "type":"structure", "required":["GlobalTableName"], @@ -1595,6 +1683,21 @@ } }, "Double":{"type":"double"}, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{"shape":"String"}, + "CachePeriodInMinutes":{"shape":"Long"} + } + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, "ErrorMessage":{"type":"string"}, "ExpectedAttributeMap":{ "type":"map", @@ -2008,6 +2111,7 @@ "member":{"shape":"AttributeMap"} }, "KMSMasterKeyArn":{"type":"string"}, + "KMSMasterKeyId":{"type":"string"}, "Key":{ "type":"map", "key":{"shape":"AttributeName"}, @@ -2108,7 +2212,7 @@ "members":{ "TableName":{ "shape":"TableName", - "documentation":"

The backups from the table specified by TableName are listed.

" + "documentation":"

The backups from the table specified by TableName are listed.

" }, "Limit":{ "shape":"BackupsInputLimit", @@ -2128,7 +2232,7 @@ }, "BackupType":{ "shape":"BackupTypeFilter", - "documentation":"

The backups from the table specified by BackupType are listed.

Where BackupType can be:

  • USER - On demand backup created by you.

  • SYSTEM - On demand backup automatically created by DynamoDB.

  • ALL - All types of on demand backups (USER and SYSTEM).

" + "documentation":"

The backups from the table specified by BackupType are listed.

Where BackupType can be:

  • USER - On-demand backup created by you.

  • SYSTEM - On-demand backup automatically created by DynamoDB.

  • ALL - All types of on-demand backups (USER and SYSTEM).

" } } }, @@ -2996,7 +3100,7 @@ "members":{ "Status":{ "shape":"SSEStatus", - "documentation":"

The current state of server-side encryption:

  • ENABLING - Server-side encryption is being enabled.

  • ENABLED - Server-side encryption is enabled.

  • DISABLING - Server-side encryption is being disabled.

  • DISABLED - Server-side encryption is disabled.

" + "documentation":"

The current state of server-side encryption:

  • ENABLING - Server-side encryption is being enabled.

  • ENABLED - Server-side encryption is enabled.

  • DISABLING - Server-side encryption is being disabled.

  • DISABLED - Server-side encryption is disabled.

  • UPDATING - Server-side encryption is being updated.

" }, "SSEType":{ "shape":"SSEType", @@ -3012,11 +3116,18 @@ "SSEEnabled":{"type":"boolean"}, "SSESpecification":{ "type":"structure", - "required":["Enabled"], "members":{ "Enabled":{ "shape":"SSEEnabled", "documentation":"

Indicates whether server-side encryption is enabled (true) or disabled (false) on the table.

" + }, + "SSEType":{ + "shape":"SSEType", + "documentation":"

Server-side encryption type:

  • AES256 - Server-side encryption which uses the AES256 algorithm.

  • KMS - Server-side encryption which uses AWS Key Management Service. (default)

" + }, + "KMSMasterKeyId":{ + "shape":"KMSMasterKeyId", + "documentation":"

The KMS Master Key (CMK) which should be used for the KMS encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS Master Key alias/aws/dynamodb.

" } }, "documentation":"

Represents the settings used to enable server-side encryption.

" @@ -3027,7 +3138,8 @@ "ENABLING", "ENABLED", "DISABLING", - "DISABLED" + "DISABLED", + "UPDATING" ] }, "SSEType":{ @@ -3717,6 +3829,10 @@ "StreamSpecification":{ "shape":"StreamSpecification", "documentation":"

Represents the DynamoDB Streams configuration for the table.

You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.

" + }, + "SSESpecification":{ + "shape":"SSESpecification", + "documentation":"

The new server-side encryption settings for the specified table.

" } }, "documentation":"

Represents the input of an UpdateTable operation.

" diff --git a/botocore/data/ec2/2014-09-01/service-2.json b/botocore/data/ec2/2014-09-01/service-2.json index 7c92d3a6..07903f11 100644 --- a/botocore/data/ec2/2014-09-01/service-2.json +++ b/botocore/data/ec2/2014-09-01/service-2.json @@ -4,6 +4,7 @@ "endpointPrefix":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2014-09-01", "protocol":"ec2" diff --git a/botocore/data/ec2/2014-10-01/service-2.json b/botocore/data/ec2/2014-10-01/service-2.json index 3bf12bd9..91ef68f0 100644 --- a/botocore/data/ec2/2014-10-01/service-2.json +++ b/botocore/data/ec2/2014-10-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2014-10-01", "protocol":"ec2" diff --git a/botocore/data/ec2/2015-03-01/service-2.json b/botocore/data/ec2/2015-03-01/service-2.json index 88cf1402..dbaf9d8b 100644 --- a/botocore/data/ec2/2015-03-01/service-2.json +++ b/botocore/data/ec2/2015-03-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-03-01", "protocol":"ec2" diff --git a/botocore/data/ec2/2015-04-15/service-2.json b/botocore/data/ec2/2015-04-15/service-2.json index 426927ae..3ca3ae43 100644 --- a/botocore/data/ec2/2015-04-15/service-2.json +++ b/botocore/data/ec2/2015-04-15/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-04-15", "protocol":"ec2" diff --git a/botocore/data/ec2/2015-10-01/service-2.json b/botocore/data/ec2/2015-10-01/service-2.json index 7a086d3b..f7b37154 100644 --- a/botocore/data/ec2/2015-10-01/service-2.json +++ b/botocore/data/ec2/2015-10-01/service-2.json @@ -6,6 +6,7 @@ "protocol":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-10-01" }, diff --git a/botocore/data/ec2/2016-04-01/service-2.json b/botocore/data/ec2/2016-04-01/service-2.json index 7ebbf187..b4f7063c 100644 --- a/botocore/data/ec2/2016-04-01/service-2.json +++ b/botocore/data/ec2/2016-04-01/service-2.json @@ -6,6 +6,7 @@ "protocol":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2016-04-01" }, diff --git a/botocore/data/ec2/2016-09-15/service-2.json b/botocore/data/ec2/2016-09-15/service-2.json index 700f84ac..34ad98b0 100644 --- a/botocore/data/ec2/2016-09-15/service-2.json +++ b/botocore/data/ec2/2016-09-15/service-2.json @@ -6,6 +6,7 @@ "protocol":"ec2", "serviceAbbreviation":"Amazon EC2", "serviceFullName":"Amazon Elastic Compute Cloud", + "serviceId":"EC2", "signatureVersion":"v4", "xmlNamespace":"http://ec2.amazonaws.com/doc/2016-09-15" }, diff --git a/botocore/data/ec2/2016-11-15/paginators-1.json b/botocore/data/ec2/2016-11-15/paginators-1.json index 3d086aac..ae4cc354 100644 --- a/botocore/data/ec2/2016-11-15/paginators-1.json +++ b/botocore/data/ec2/2016-11-15/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "DescribeRouteTables": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RouteTables" + }, "DescribeIamInstanceProfileAssociations": { "input_token": "NextToken", "output_token": "NextToken", @@ -82,6 +88,12 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "NatGateways" + }, + "DescribeNetworkInterfaces": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "NetworkInterfaces" } } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index df945c9d..15a0b20a 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -313,7 +313,7 @@ }, "input":{"shape":"CopyImageRequest"}, "output":{"shape":"CopyImageResult"}, - "documentation":"

Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. You cannot create an unencrypted copy of an encrypted backing snapshot.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -323,7 +323,7 @@ }, "input":{"shape":"CopySnapshotRequest"}, "output":{"shape":"CopySnapshotResult"}, - "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateCustomerGateway":{ "name":"CreateCustomerGateway", @@ -353,7 +353,7 @@ }, "input":{"shape":"CreateDefaultVpcRequest"}, "output":{"shape":"CreateDefaultVpcResult"}, - "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

iIf you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" + "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" }, "CreateDhcpOptions":{ "name":"CreateDhcpOptions", @@ -1188,7 +1188,7 @@ }, "input":{"shape":"DescribeHostReservationOfferingsRequest"}, "output":{"shape":"DescribeHostReservationOfferingsResult"}, - "documentation":"

Describes the Dedicated Host reservations that are available to purchase.

The results describe all the Dedicated Host reservation offerings, including offerings that may not match the instance family and region of your Dedicated Hosts. When purchasing an offering, ensure that the instance family and Region of the offering matches that of the Dedicated Hosts with which it is to be associated . For more information about supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the Dedicated Host reservations that are available to purchase.

The results describe all the Dedicated Host reservation offerings, including offerings that may not match the instance family and region of your Dedicated Hosts. When purchasing an offering, ensure that the instance family and Region of the offering matches that of the Dedicated Hosts with which it is to be associated. For more information about supported instance types, see Dedicated Hosts Overview in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeHostReservations":{ "name":"DescribeHostReservations", @@ -1298,7 +1298,7 @@ }, "input":{"shape":"DescribeInstanceCreditSpecificationsRequest"}, "output":{"shape":"DescribeInstanceCreditSpecificationsResult"}, - "documentation":"

Describes the credit option for CPU usage of one or more of your T2 instances. The credit options are standard and unlimited.

If you do not specify an instance ID, Amazon EC2 returns only the T2 instances with the unlimited credit option. If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a T2 instance, an error is returned.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally.

For more information, see T2 Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the credit option for CPU usage of one or more of your T2 or T3 instances. The credit options are standard and unlimited.

If you do not specify an instance ID, Amazon EC2 returns T2 and T3 instances with the unlimited credit option, as well as instances that were previously configured as T2 or T3 with the unlimited credit option. For example, if you resize a T2 instance, while it is configured as unlimited, to an M4 instance, Amazon EC2 returns the M4 instance.

If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a T2 or T3 instance, an error is returned.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeInstanceStatus":{ "name":"DescribeInstanceStatus", @@ -2130,7 +2130,7 @@ }, "input":{"shape":"ModifyHostsRequest"}, "output":{"shape":"ModifyHostsResult"}, - "documentation":"

Modify the auto-placement setting of a Dedicated Host. When auto-placement is enabled, any instances that you launch with a tenancy of host but without a specific host ID are placed onto any available Dedicated Host in your account that has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID ito have the instance launch onto a specific host. If no host ID is provided, the instance is launched onto a suitable host with auto-placement enabled.

" + "documentation":"

Modify the auto-placement setting of a Dedicated Host. When auto-placement is enabled, any instances that you launch with a tenancy of host but without a specific host ID are placed onto any available Dedicated Host in your account that has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID to have the instance launch onto a specific host. If no host ID is provided, the instance is launched onto a suitable host with auto-placement enabled.

" }, "ModifyIdFormat":{ "name":"ModifyIdFormat", @@ -2176,7 +2176,7 @@ }, "input":{"shape":"ModifyInstanceCreditSpecificationRequest"}, "output":{"shape":"ModifyInstanceCreditSpecificationResult"}, - "documentation":"

Modifies the credit option for CPU usage on a running or stopped T2 instance. The credit options are standard and unlimited.

For more information, see T2 Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the credit option for CPU usage on a running or stopped T2 or T3 instance. The credit options are standard and unlimited.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyInstancePlacement":{ "name":"ModifyInstancePlacement", @@ -2439,7 +2439,7 @@ }, "input":{"shape":"ReleaseHostsRequest"}, "output":{"shape":"ReleaseHostsResult"}, - "documentation":"

When you no longer want to use an On-Demand Dedicated Host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated Hosts that have been released can no longer be specified in another request, for example, ModifyHosts. You must stop or terminate all instances on a host before it can be released.

When Dedicated Hosts are released, it may take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated Hosts. Wait a few minutes and then try again.

Released hosts still appear in a DescribeHosts response.

" + "documentation":"

When you no longer want to use an On-Demand Dedicated Host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated Hosts that have been released can no longer be specified in another request, for example, to modify the host. You must stop or terminate all instances on a host before it can be released.

When Dedicated Hosts are released, it may take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated Hosts. Wait a few minutes and then try again.

Released hosts still appear in a DescribeHosts response.

" }, "ReplaceIamInstanceProfileAssociation":{ "name":"ReplaceIamInstanceProfileAssociation", @@ -2770,8 +2770,7 @@ "documentation":"

The ID of the VPC peering connection. You must specify this parameter in the request.

", "locationName":"vpcPeeringConnectionId" } - }, - "documentation":"

Contains the parameters for AcceptVpcPeeringConnection.

" + } }, "AcceptVpcPeeringConnectionResult":{ "type":"structure", @@ -2781,8 +2780,7 @@ "documentation":"

Information about the VPC peering connection.

", "locationName":"vpcPeeringConnection" } - }, - "documentation":"

Contains the output of AcceptVpcPeeringConnection.

" + } }, "AccountAttribute":{ "type":"structure", @@ -3018,9 +3016,13 @@ "shape":"Integer", "documentation":"

The number of Dedicated Hosts to allocate to your account with these parameters.

", "locationName":"quantity" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the Dedicated Host during creation.

", + "locationName":"TagSpecification" } - }, - "documentation":"

Contains the parameters for AllocateHosts.

" + } }, "AllocateHostsResult":{ "type":"structure", @@ -3219,8 +3221,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for AssociateDhcpOptions.

" + } }, "AssociateIamInstanceProfileRequest":{ "type":"structure", @@ -3271,8 +3272,7 @@ "documentation":"

The ID of the subnet.

", "locationName":"subnetId" } - }, - "documentation":"

Contains the parameters for AssociateRouteTable.

" + } }, "AssociateRouteTableResult":{ "type":"structure", @@ -3282,8 +3282,7 @@ "documentation":"

The route table association ID. This ID is required for disassociating the route table.

", "locationName":"associationId" } - }, - "documentation":"

Contains the output of AssociateRouteTable.

" + } }, "AssociateSubnetCidrBlockRequest":{ "type":"structure", @@ -3394,8 +3393,7 @@ "documentation":"

The ID of a ClassicLink-enabled VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for AttachClassicLinkVpc.

" + } }, "AttachClassicLinkVpcResult":{ "type":"structure", @@ -3405,8 +3403,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of AttachClassicLinkVpc.

" + } }, "AttachInternetGatewayRequest":{ "type":"structure", @@ -3430,8 +3427,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for AttachInternetGateway.

" + } }, "AttachNetworkInterfaceRequest":{ "type":"structure", @@ -3617,8 +3613,7 @@ "documentation":"

Not supported. Use a set of IP permissions to specify a destination security group.

", "locationName":"sourceSecurityGroupOwnerId" } - }, - "documentation":"

Contains the parameters for AuthorizeSecurityGroupEgress.

" + } }, "AuthorizeSecurityGroupIngressRequest":{ "type":"structure", @@ -3664,8 +3659,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for AuthorizeSecurityGroupIngress.

" + } }, "AutoPlacement":{ "type":"string", @@ -4612,7 +4606,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the destination snapshots of the copied image should be encrypted. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Specifies whether the destination snapshots of the copied image should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an encrypted snapshot. The default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"encrypted" }, "KmsKeyId":{ @@ -4669,7 +4663,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the destination snapshot should be encrypted. You can encrypt a copy of an unencrypted snapshot using this flag, but you cannot use it to create an unencrypted copy from an encrypted snapshot. Your default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Specifies whether the destination snapshot should be encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot use it to create an unencrypted copy of an encrypted snapshot. Your default CMK for EBS is used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"encrypted" }, "KmsKeyId":{ @@ -4810,8 +4804,7 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } - }, - "documentation":"

Contains the parameters for CreateDefaultVpc.

" + } }, "CreateDefaultVpcResult":{ "type":"structure", @@ -4821,8 +4814,7 @@ "documentation":"

Information about the VPC.

", "locationName":"vpc" } - }, - "documentation":"

Contains the output of CreateDefaultVpc.

" + } }, "CreateDhcpOptionsRequest":{ "type":"structure", @@ -4838,8 +4830,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for CreateDhcpOptions.

" + } }, "CreateDhcpOptionsResult":{ "type":"structure", @@ -4849,8 +4840,7 @@ "documentation":"

A set of DHCP options.

", "locationName":"dhcpOptions" } - }, - "documentation":"

Contains the output of CreateDhcpOptions.

" + } }, "CreateEgressOnlyInternetGatewayRequest":{ "type":"structure", @@ -5000,10 +4990,9 @@ }, "LogDestination":{ "shape":"String", - "documentation":"

Specifies the destination to which the flow log data is to be published. Flow log data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. The value specified for this parameter depends on the value specified for LogDestinationType.

If LogDestinationType is not specified or cloud-watch-logs, specify the Amazon Resource Name (ARN) of the CloudWatch Logs log group.

If LogDestinationType is s3, specify the ARN of the Amazon S3 bucket. You can also specify a subfolder in the bucket. To specify a subfolder in the bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, to specify a subfolder named my-logs in a bucket named my-bucket, use the following ARN: arn:aws:s3:::my-bucket/my-logs/.

" + "documentation":"

Specifies the destination to which the flow log data is to be published. Flow log data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. The value specified for this parameter depends on the value specified for LogDestinationType.

If LogDestinationType is not specified or cloud-watch-logs, specify the Amazon Resource Name (ARN) of the CloudWatch Logs log group.

If LogDestinationType is s3, specify the ARN of the Amazon S3 bucket. You can also specify a subfolder in the bucket. To specify a subfolder in the bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, to specify a subfolder named my-logs in a bucket named my-bucket, use the following ARN: arn:aws:s3:::my-bucket/my-logs/. You cannot use AWSLogs as a subfolder name. This is a reserved term.

" } - }, - "documentation":"

Contains the parameters for CreateFlowLogs.

" + } }, "CreateFlowLogsResult":{ "type":"structure", @@ -5023,8 +5012,7 @@ "documentation":"

Information about the flow logs that could not be created successfully.

", "locationName":"unsuccessful" } - }, - "documentation":"

Contains the output of CreateFlowLogs.

" + } }, "CreateFpgaImageRequest":{ "type":"structure", @@ -5080,7 +5068,7 @@ "members":{ "BlockDeviceMappings":{ "shape":"BlockDeviceMappingRequestList", - "documentation":"

Information about one or more block device mappings.

", + "documentation":"

Information about one or more block device mappings. This parameter cannot be used to modify the encryption status of existing volumes or snapshots. To create an AMI with encrypted snapshots, use the CopyImage action.

", "locationName":"blockDeviceMapping" }, "Description":{ @@ -5168,8 +5156,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for CreateInternetGateway.

" + } }, "CreateInternetGatewayResult":{ "type":"structure", @@ -5179,8 +5166,7 @@ "documentation":"

Information about the internet gateway.

", "locationName":"internetGateway" } - }, - "documentation":"

Contains the output of CreateInternetGateway.

" + } }, "CreateKeyPairRequest":{ "type":"structure", @@ -5195,8 +5181,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for CreateKeyPair.

" + } }, "CreateLaunchTemplateRequest":{ "type":"structure", @@ -5300,8 +5285,7 @@ "shape":"String", "documentation":"

The subnet in which to create the NAT gateway.

" } - }, - "documentation":"

Contains the parameters for CreateNatGateway.

" + } }, "CreateNatGatewayResult":{ "type":"structure", @@ -5316,8 +5300,7 @@ "documentation":"

Information about the NAT gateway.

", "locationName":"natGateway" } - }, - "documentation":"

Contains the output of CreateNatGateway.

" + } }, "CreateNetworkAclEntryRequest":{ "type":"structure", @@ -5379,8 +5362,7 @@ "documentation":"

The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is reserved for internal use.

", "locationName":"ruleNumber" } - }, - "documentation":"

Contains the parameters for CreateNetworkAclEntry.

" + } }, "CreateNetworkAclRequest":{ "type":"structure", @@ -5396,8 +5378,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for CreateNetworkAcl.

" + } }, "CreateNetworkAclResult":{ "type":"structure", @@ -5407,8 +5388,7 @@ "documentation":"

Information about the network ACL.

", "locationName":"networkAcl" } - }, - "documentation":"

Contains the output of CreateNetworkAcl.

" + } }, "CreateNetworkInterfacePermissionRequest":{ "type":"structure", @@ -5636,8 +5616,7 @@ "documentation":"

The ID of a VPC peering connection.

", "locationName":"vpcPeeringConnectionId" } - }, - "documentation":"

Contains the parameters for CreateRoute.

" + } }, "CreateRouteResult":{ "type":"structure", @@ -5647,8 +5626,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of CreateRoute.

" + } }, "CreateRouteTableRequest":{ "type":"structure", @@ -5664,8 +5642,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for CreateRouteTable.

" + } }, "CreateRouteTableResult":{ "type":"structure", @@ -5675,8 +5652,7 @@ "documentation":"

Information about the route table.

", "locationName":"routeTable" } - }, - "documentation":"

Contains the output of CreateRouteTable.

" + } }, "CreateSecurityGroupRequest":{ "type":"structure", @@ -5703,8 +5679,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for CreateSecurityGroup.

" + } }, "CreateSecurityGroupResult":{ "type":"structure", @@ -5714,8 +5689,7 @@ "documentation":"

The ID of the security group.

", "locationName":"groupId" } - }, - "documentation":"

Contains the output of CreateSecurityGroup.

" + } }, "CreateSnapshotRequest":{ "type":"structure", @@ -5803,8 +5777,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for CreateSubnet.

" + } }, "CreateSubnetResult":{ "type":"structure", @@ -5814,8 +5787,7 @@ "documentation":"

Information about the subnet.

", "locationName":"subnet" } - }, - "documentation":"

Contains the output of CreateSubnet.

" + } }, "CreateTagsRequest":{ "type":"structure", @@ -5839,8 +5811,7 @@ "documentation":"

One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

", "locationName":"Tag" } - }, - "documentation":"

Contains the parameters for CreateTags.

" + } }, "CreateVolumePermission":{ "type":"structure", @@ -6021,7 +5992,7 @@ }, "PrivateDnsEnabled":{ "shape":"Boolean", - "documentation":"

(Interface endpoint) Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the region (for example, kinesis.us-east-1.amazonaws.com) which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.

To use a private hosted zone, you must set the following VPC attributes to true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to set the VPC attributes.

Default: true

" + "documentation":"

(Interface endpoint) Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the region (for example, kinesis.us-east-1.amazonaws.com) which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.

To use a private hosted zone, you must set the following VPC attributes to true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to set the VPC attributes.

Default: false

" } }, "documentation":"

Contains the parameters for CreateVpcEndpoint.

" @@ -6107,8 +6078,7 @@ "shape":"String", "documentation":"

The region code for the accepter VPC, if the accepter VPC is located in a region other than the region in which you make the request.

Default: The region in which you make the request.

" } - }, - "documentation":"

Contains the parameters for CreateVpcPeeringConnection.

" + } }, "CreateVpcPeeringConnectionResult":{ "type":"structure", @@ -6118,8 +6088,7 @@ "documentation":"

Information about the VPC peering connection.

", "locationName":"vpcPeeringConnection" } - }, - "documentation":"

Contains the output of CreateVpcPeeringConnection.

" + } }, "CreateVpcRequest":{ "type":"structure", @@ -6144,8 +6113,7 @@ "documentation":"

The tenancy options for instances launched into the VPC. For default, instances are launched with shared tenancy by default. You can launch instances with any tenancy into a shared tenancy VPC. For dedicated, instances are launched as dedicated tenancy instances by default. You can only launch instances with a tenancy of dedicated or host into a dedicated tenancy VPC.

Important: The host value cannot be used with this parameter. Use the default or dedicated values only.

Default: default

", "locationName":"instanceTenancy" } - }, - "documentation":"

Contains the parameters for CreateVpc.

" + } }, "CreateVpcResult":{ "type":"structure", @@ -6155,8 +6123,7 @@ "documentation":"

Information about the VPC.

", "locationName":"vpc" } - }, - "documentation":"

Contains the output of CreateVpc.

" + } }, "CreateVpnConnectionRequest":{ "type":"structure", @@ -6260,11 +6227,11 @@ "members":{ "CpuCredits":{ "shape":"String", - "documentation":"

The credit option for CPU usage of a T2 instance. Valid values are standard and unlimited.

", + "documentation":"

The credit option for CPU usage of a T2 or T3 instance. Valid values are standard and unlimited.

", "locationName":"cpuCredits" } }, - "documentation":"

Describes the credit option for CPU usage of a T2 instance.

" + "documentation":"

Describes the credit option for CPU usage of a T2 or T3 instance.

" }, "CreditSpecificationRequest":{ "type":"structure", @@ -6272,10 +6239,10 @@ "members":{ "CpuCredits":{ "shape":"String", - "documentation":"

The credit option for CPU usage of a T2 instance. Valid values are standard and unlimited.

" + "documentation":"

The credit option for CPU usage of a T2 or T3 instance. Valid values are standard and unlimited.

" } }, - "documentation":"

The credit option for CPU usage of a T2 instance.

" + "documentation":"

The credit option for CPU usage of a T2 or T3 instance.

" }, "CurrencyCodeValues":{ "type":"string", @@ -6375,8 +6342,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DeleteDhcpOptions.

" + } }, "DeleteEgressOnlyInternetGatewayRequest":{ "type":"structure", @@ -6528,8 +6494,7 @@ "documentation":"

One or more flow log IDs.

", "locationName":"FlowLogId" } - }, - "documentation":"

Contains the parameters for DeleteFlowLogs.

" + } }, "DeleteFlowLogsResult":{ "type":"structure", @@ -6539,8 +6504,7 @@ "documentation":"

Information about the flow logs that could not be deleted successfully.

", "locationName":"unsuccessful" } - }, - "documentation":"

Contains the output of DeleteFlowLogs.

" + } }, "DeleteFpgaImageRequest":{ "type":"structure", @@ -6580,8 +6544,7 @@ "documentation":"

The ID of the internet gateway.

", "locationName":"internetGatewayId" } - }, - "documentation":"

Contains the parameters for DeleteInternetGateway.

" + } }, "DeleteKeyPairRequest":{ "type":"structure", @@ -6596,8 +6559,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DeleteKeyPair.

" + } }, "DeleteLaunchTemplateRequest":{ "type":"structure", @@ -6733,8 +6695,7 @@ "shape":"String", "documentation":"

The ID of the NAT gateway.

" } - }, - "documentation":"

Contains the parameters for DeleteNatGateway.

" + } }, "DeleteNatGatewayResult":{ "type":"structure", @@ -6744,8 +6705,7 @@ "documentation":"

The ID of the NAT gateway.

", "locationName":"natGatewayId" } - }, - "documentation":"

Contains the output of DeleteNatGateway.

" + } }, "DeleteNetworkAclEntryRequest":{ "type":"structure", @@ -6775,8 +6735,7 @@ "documentation":"

The rule number of the entry to delete.

", "locationName":"ruleNumber" } - }, - "documentation":"

Contains the parameters for DeleteNetworkAclEntry.

" + } }, "DeleteNetworkAclRequest":{ "type":"structure", @@ -6792,8 +6751,7 @@ "documentation":"

The ID of the network ACL.

", "locationName":"networkAclId" } - }, - "documentation":"

Contains the parameters for DeleteNetworkAcl.

" + } }, "DeleteNetworkInterfacePermissionRequest":{ "type":"structure", @@ -6883,8 +6841,7 @@ "documentation":"

The ID of the route table.

", "locationName":"routeTableId" } - }, - "documentation":"

Contains the parameters for DeleteRoute.

" + } }, "DeleteRouteTableRequest":{ "type":"structure", @@ -6900,8 +6857,7 @@ "documentation":"

The ID of the route table.

", "locationName":"routeTableId" } - }, - "documentation":"

Contains the parameters for DeleteRouteTable.

" + } }, "DeleteSecurityGroupRequest":{ "type":"structure", @@ -6919,8 +6875,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DeleteSecurityGroup.

" + } }, "DeleteSnapshotRequest":{ "type":"structure", @@ -6962,8 +6917,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DeleteSubnet.

" + } }, "DeleteTagsRequest":{ "type":"structure", @@ -6984,8 +6938,7 @@ "documentation":"

One or more tags to delete. Specify a tag key and an optional tag value to delete specific tags. If you specify a tag key without a tag value, we delete any tag with this key regardless of its value. If you specify a tag key with an empty string as the tag value, we delete the tag only if its value is an empty string.

If you omit this parameter, we delete all user-defined tags for the specified resources. We do not delete AWS-generated tags (tags that have the aws: prefix).

", "locationName":"tag" } - }, - "documentation":"

Contains the parameters for DeleteTags.

" + } }, "DeleteVolumeRequest":{ "type":"structure", @@ -7094,8 +7047,7 @@ "documentation":"

The ID of the VPC peering connection.

", "locationName":"vpcPeeringConnectionId" } - }, - "documentation":"

Contains the parameters for DeleteVpcPeeringConnection.

" + } }, "DeleteVpcPeeringConnectionResult":{ "type":"structure", @@ -7105,8 +7057,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of DeleteVpcPeeringConnection.

" + } }, "DeleteVpcRequest":{ "type":"structure", @@ -7121,8 +7072,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DeleteVpc.

" + } }, "DeleteVpnConnectionRequest":{ "type":"structure", @@ -7370,8 +7320,7 @@ "documentation":"

The token to retrieve the next page of results.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the parameters for DescribeClassicLinkInstances.

" + } }, "DescribeClassicLinkInstancesResult":{ "type":"structure", @@ -7386,8 +7335,7 @@ "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the output of DescribeClassicLinkInstances.

" + } }, "DescribeConversionTaskList":{ "type":"list", @@ -7473,8 +7421,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DescribeDhcpOptions.

" + } }, "DescribeDhcpOptionsResult":{ "type":"structure", @@ -7484,8 +7431,7 @@ "documentation":"

Information about one or more DHCP options sets.

", "locationName":"dhcpOptionsSet" } - }, - "documentation":"

Contains the output of DescribeDhcpOptions.

" + } }, "DescribeEgressOnlyInternetGatewaysRequest":{ "type":"structure", @@ -7769,8 +7715,7 @@ "shape":"String", "documentation":"

The token to retrieve the next page of results.

" } - }, - "documentation":"

Contains the parameters for DescribeFlowLogs.

" + } }, "DescribeFlowLogsResult":{ "type":"structure", @@ -7785,8 +7730,7 @@ "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the output of DescribeFlowLogs.

" + } }, "DescribeFpgaImageAttributeRequest":{ "type":"structure", @@ -7923,7 +7867,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500.If maxResults is given a larger value than 500, you receive an error.

" + "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" }, "NextToken":{ "shape":"String", @@ -7969,8 +7913,7 @@ "documentation":"

The token to retrieve the next page of results.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the parameters for DescribeHosts.

" + } }, "DescribeHostsResult":{ "type":"structure", @@ -7985,8 +7928,7 @@ "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the output of DescribeHosts.

" + } }, "DescribeIamInstanceProfileAssociationsRequest":{ "type":"structure", @@ -8408,8 +8350,7 @@ "documentation":"

One or more internet gateway IDs.

Default: Describes all your internet gateways.

", "locationName":"internetGatewayId" } - }, - "documentation":"

Contains the parameters for DescribeInternetGateways.

" + } }, "DescribeInternetGatewaysResult":{ "type":"structure", @@ -8419,8 +8360,7 @@ "documentation":"

Information about one or more internet gateways.

", "locationName":"internetGatewaySet" } - }, - "documentation":"

Contains the output of DescribeInternetGateways.

" + } }, "DescribeKeyPairsRequest":{ "type":"structure", @@ -8440,8 +8380,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DescribeKeyPairs.

" + } }, "DescribeKeyPairsResult":{ "type":"structure", @@ -8451,8 +8390,7 @@ "documentation":"

Information about one or more key pairs.

", "locationName":"keySet" } - }, - "documentation":"

Contains the output of DescribeKeyPairs.

" + } }, "DescribeLaunchTemplateVersionsRequest":{ "type":"structure", @@ -8626,8 +8564,7 @@ "shape":"String", "documentation":"

The token to retrieve the next page of results.

" } - }, - "documentation":"

Contains the parameters for DescribeNatGateways.

" + } }, "DescribeNatGatewaysResult":{ "type":"structure", @@ -8642,15 +8579,14 @@ "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the output of DescribeNatGateways.

" + } }, "DescribeNetworkAclsRequest":{ "type":"structure", "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • association.association-id - The ID of an association ID for the ACL.

  • association.network-acl-id - The ID of the network ACL involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • default - Indicates whether the ACL is the default network ACL for the VPC.

  • entry.cidr - The IPv4 CIDR range specified in the entry.

  • entry.egress - Indicates whether the entry applies to egress traffic.

  • entry.icmp.code - The ICMP code specified in the entry, if any.

  • entry.icmp.type - The ICMP type specified in the entry, if any.

  • entry.ipv6-cidr - The IPv6 CIDR range specified in the entry.

  • entry.port-range.from - The start of the port range specified in the entry.

  • entry.port-range.to - The end of the port range specified in the entry.

  • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

  • entry.rule-action - Allows or denies the matching traffic (allow | deny).

  • entry.rule-number - The number of an entry (in other words, rule) in the ACL's set of entries.

  • network-acl-id - The ID of the network ACL.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network ACL.

", + "documentation":"

One or more filters.

  • association.association-id - The ID of an association ID for the ACL.

  • association.network-acl-id - The ID of the network ACL involved in the association.

  • association.subnet-id - The ID of the subnet involved in the association.

  • default - Indicates whether the ACL is the default network ACL for the VPC.

  • entry.cidr - The IPv4 CIDR range specified in the entry.

  • entry.icmp.code - The ICMP code specified in the entry, if any.

  • entry.icmp.type - The ICMP type specified in the entry, if any.

  • entry.ipv6-cidr - The IPv6 CIDR range specified in the entry.

  • entry.port-range.from - The start of the port range specified in the entry.

  • entry.port-range.to - The end of the port range specified in the entry.

  • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

  • entry.rule-action - Allows or denies the matching traffic (allow | deny).

  • entry.rule-number - The number of an entry (in other words, rule) in the set of ACL entries.

  • network-acl-id - The ID of the network ACL.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network ACL.

", "locationName":"Filter" }, "DryRun":{ @@ -8663,8 +8599,7 @@ "documentation":"

One or more network ACL IDs.

Default: Describes all your network ACLs.

", "locationName":"NetworkAclId" } - }, - "documentation":"

Contains the parameters for DescribeNetworkAcls.

" + } }, "DescribeNetworkAclsResult":{ "type":"structure", @@ -8674,8 +8609,7 @@ "documentation":"

Information about one or more network ACLs.

", "locationName":"networkAclSet" } - }, - "documentation":"

Contains the output of DescribeNetworkAcls.

" + } }, "DescribeNetworkInterfaceAttributeRequest":{ "type":"structure", @@ -8787,6 +8721,14 @@ "shape":"NetworkInterfaceIdList", "documentation":"

One or more network interface IDs.

Default: Describes all your network interfaces.

", "locationName":"NetworkInterfaceId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to retrieve the next page of results.

" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

" } }, "documentation":"

Contains the parameters for DescribeNetworkInterfaces.

" @@ -8798,6 +8740,11 @@ "shape":"NetworkInterfaceList", "documentation":"

Information about one or more network interfaces.

", "locationName":"networkInterfaceSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" } }, "documentation":"

Contains the output of DescribeNetworkInterfaces.

" @@ -8859,8 +8806,7 @@ "documentation":"

One or more prefix list IDs.

", "locationName":"PrefixListId" } - }, - "documentation":"

Contains the parameters for DescribePrefixLists.

" + } }, "DescribePrefixListsResult":{ "type":"structure", @@ -8875,8 +8821,7 @@ "documentation":"

All available prefix lists.

", "locationName":"prefixListSet" } - }, - "documentation":"

Contains the output of DescribePrefixLists.

" + } }, "DescribePrincipalIdFormatRequest":{ "type":"structure", @@ -9163,9 +9108,16 @@ "shape":"ValueStringList", "documentation":"

One or more route table IDs.

Default: Describes all your route tables.

", "locationName":"RouteTableId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to retrieve the next page of results.

" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 100.

" } - }, - "documentation":"

Contains the parameters for DescribeRouteTables.

" + } }, "DescribeRouteTablesResult":{ "type":"structure", @@ -9174,6 +9126,11 @@ "shape":"RouteTableList", "documentation":"

Information about one or more route tables.

", "locationName":"routeTableSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" } }, "documentation":"

Contains the output of DescribeRouteTables.

" @@ -9291,7 +9248,7 @@ "members":{ "DryRun":{ "shape":"Boolean", - "documentation":"

Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" }, "GroupId":{ "shape":"GroupIds", @@ -9340,8 +9297,7 @@ "shape":"Integer", "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another request with the returned NextToken value. This value can be between 5 and 1000. If this parameter is not specified, then all results are returned.

" } - }, - "documentation":"

Contains the parameters for DescribeSecurityGroups.

" + } }, "DescribeSecurityGroupsResult":{ "type":"structure", @@ -9356,8 +9312,7 @@ "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the output of DescribeSecurityGroups.

" + } }, "DescribeSnapshotAttributeRequest":{ "type":"structure", @@ -9757,7 +9712,7 @@ "members":{ "DryRun":{ "shape":"Boolean", - "documentation":"

Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" }, "MaxResults":{ "shape":"MaxResults", @@ -9806,8 +9761,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DescribeSubnets.

" + } }, "DescribeSubnetsResult":{ "type":"structure", @@ -9817,8 +9771,7 @@ "documentation":"

Information about one or more subnets.

", "locationName":"subnetSet" } - }, - "documentation":"

Contains the output of DescribeSubnets.

" + } }, "DescribeTagsRequest":{ "type":"structure", @@ -9830,7 +9783,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • key - The tag key.

  • resource-id - The resource ID.

  • resource-type - The resource type (customer-gateway | dhcp-options | elastic-ip | fleet | fpga-image | image | instance | internet-gateway | launch-template | natgateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpc-peering-connection | vpn-connection | vpn-gateway).

  • value - The tag value.

", + "documentation":"

One or more filters.

  • key - The tag key.

  • resource-id - The ID of the resource.

  • resource-type - The resource type (customer-gateway | dedicated-host | dhcp-options | elastic-ip | fleet | fpga-image | image | instance | internet-gateway | launch-template | natgateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpc-peering-connection | vpn-connection | vpn-gateway).

  • tag:<key> - The key/value combination of the tag. For example, specify \"tag:Owner\" for the filter name and \"TeamA\" for the filter value to find resources with the tag \"Owner=TeamA\".

  • value - The tag value.

", "locationName":"Filter" }, "MaxResults":{ @@ -9843,8 +9796,7 @@ "documentation":"

The token to retrieve the next page of results.

", "locationName":"nextToken" } - }, - "documentation":"

Contains the parameters for DescribeTags.

" + } }, "DescribeTagsResult":{ "type":"structure", @@ -9856,15 +9808,17 @@ }, "Tags":{ "shape":"TagDescriptionList", - "documentation":"

A list of tags.

", + "documentation":"

The tags.

", "locationName":"tagSet" } - }, - "documentation":"

Contains the output of DescribeTags.

" + } }, "DescribeVolumeAttributeRequest":{ "type":"structure", - "required":["VolumeId"], + "required":[ + "Attribute", + "VolumeId" + ], "members":{ "Attribute":{ "shape":"VolumeAttributeName", @@ -10057,8 +10011,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DescribeVpcAttribute.

" + } }, "DescribeVpcAttributeResult":{ "type":"structure", @@ -10078,8 +10031,7 @@ "documentation":"

Indicates whether DNS resolution is enabled for the VPC. If this attribute is true, the Amazon DNS server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not.

", "locationName":"enableDnsSupport" } - }, - "documentation":"

Contains the output of DescribeVpcAttribute.

" + } }, "DescribeVpcClassicLinkDnsSupportRequest":{ "type":"structure", @@ -10098,8 +10050,7 @@ "shape":"VpcClassicLinkIdList", "documentation":"

One or more VPC IDs.

" } - }, - "documentation":"

Contains the parameters for DescribeVpcClassicLinkDnsSupport.

" + } }, "DescribeVpcClassicLinkDnsSupportResult":{ "type":"structure", @@ -10114,8 +10065,7 @@ "documentation":"

Information about the ClassicLink DNS support status of the VPCs.

", "locationName":"vpcs" } - }, - "documentation":"

Contains the output of DescribeVpcClassicLinkDnsSupport.

" + } }, "DescribeVpcClassicLinkRequest":{ "type":"structure", @@ -10135,8 +10085,7 @@ "documentation":"

One or more VPCs for which you want to describe the ClassicLink status.

", "locationName":"VpcId" } - }, - "documentation":"

Contains the parameters for DescribeVpcClassicLink.

" + } }, "DescribeVpcClassicLinkResult":{ "type":"structure", @@ -10146,8 +10095,7 @@ "documentation":"

The ClassicLink status of one or more VPCs.

", "locationName":"vpcSet" } - }, - "documentation":"

Contains the output of DescribeVpcClassicLink.

" + } }, "DescribeVpcEndpointConnectionNotificationsRequest":{ "type":"structure", @@ -10422,8 +10370,7 @@ "documentation":"

One or more VPC peering connection IDs.

Default: Describes all your VPC peering connections.

", "locationName":"VpcPeeringConnectionId" } - }, - "documentation":"

Contains the parameters for DescribeVpcPeeringConnections.

" + } }, "DescribeVpcPeeringConnectionsResult":{ "type":"structure", @@ -10433,8 +10380,7 @@ "documentation":"

Information about the VPC peering connections.

", "locationName":"vpcPeeringConnectionSet" } - }, - "documentation":"

Contains the output of DescribeVpcPeeringConnections.

" + } }, "DescribeVpcsRequest":{ "type":"structure", @@ -10454,8 +10400,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DescribeVpcs.

" + } }, "DescribeVpcsResult":{ "type":"structure", @@ -10465,8 +10410,7 @@ "documentation":"

Information about one or more VPCs.

", "locationName":"vpcSet" } - }, - "documentation":"

Contains the output of DescribeVpcs.

" + } }, "DescribeVpnConnectionsRequest":{ "type":"structure", @@ -10554,8 +10498,7 @@ "documentation":"

The ID of the VPC to which the instance is linked.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for DetachClassicLinkVpc.

" + } }, "DetachClassicLinkVpcResult":{ "type":"structure", @@ -10565,8 +10508,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of DetachClassicLinkVpc.

" + } }, "DetachInternetGatewayRequest":{ "type":"structure", @@ -10590,8 +10532,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for DetachInternetGateway.

" + } }, "DetachNetworkInterfaceRequest":{ "type":"structure", @@ -10763,8 +10704,7 @@ "shape":"String", "documentation":"

The ID of the VPC.

" } - }, - "documentation":"

Contains the parameters for DisableVpcClassicLinkDnsSupport.

" + } }, "DisableVpcClassicLinkDnsSupportResult":{ "type":"structure", @@ -10774,8 +10714,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of DisableVpcClassicLinkDnsSupport.

" + } }, "DisableVpcClassicLinkRequest":{ "type":"structure", @@ -10791,8 +10730,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for DisableVpcClassicLink.

" + } }, "DisableVpcClassicLinkResult":{ "type":"structure", @@ -10802,8 +10740,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of DisableVpcClassicLink.

" + } }, "DisassociateAddressRequest":{ "type":"structure", @@ -10858,8 +10795,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for DisassociateRouteTable.

" + } }, "DisassociateSubnetCidrBlockRequest":{ "type":"structure", @@ -11050,11 +10986,6 @@ "EbsBlockDevice":{ "type":"structure", "members":{ - "Encrypted":{ - "shape":"Boolean", - "documentation":"

Indicates whether the EBS volume is encrypted. Encrypted volumes can only be attached to instances that support Amazon EBS encryption. If you are creating a volume from a snapshot, you can't specify an encryption value. This is because only blank volumes can be encrypted on creation.

", - "locationName":"encrypted" - }, "DeleteOnTermination":{ "shape":"Boolean", "documentation":"

Indicates whether the EBS volume is deleted on instance termination.

", @@ -11065,10 +10996,6 @@ "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", "locationName":"iops" }, - "KmsKeyId":{ - "shape":"String", - "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" - }, "SnapshotId":{ "shape":"String", "documentation":"

The ID of the snapshot.

", @@ -11083,6 +11010,15 @@ "shape":"VolumeType", "documentation":"

The volume type: gp2, io1, st1, sc1, or standard.

Default: standard

", "locationName":"volumeType" + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

Indicates whether the EBS volume is encrypted. Encrypted volumes can only be attached to instances that support Amazon EBS encryption.

If you are creating a volume from a snapshot, you cannot specify an encryption value. This is because only blank volumes can be encrypted on creation. If you are creating a snapshot from an existing EBS volume, you cannot specify an encryption value that differs from that of the EBS volume. We recommend that you omit the encryption value from the block device mappings when creating an image from an instance.

", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" } }, "documentation":"

Describes a block device for an EBS volume.

" @@ -11350,8 +11286,7 @@ "shape":"String", "documentation":"

The ID of the VPC.

" } - }, - "documentation":"

Contains the parameters for EnableVpcClassicLinkDnsSupport.

" + } }, "EnableVpcClassicLinkDnsSupportResult":{ "type":"structure", @@ -11361,8 +11296,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of EnableVpcClassicLinkDnsSupport.

" + } }, "EnableVpcClassicLinkRequest":{ "type":"structure", @@ -11378,8 +11312,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for EnableVpcClassicLink.

" + } }, "EnableVpcClassicLinkResult":{ "type":"structure", @@ -11389,8 +11322,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of EnableVpcClassicLink.

" + } }, "EventCode":{ "type":"string", @@ -11852,7 +11784,7 @@ }, "Version":{ "shape":"String", - "documentation":"

The version number. By default, the default version of the launch template is used.

", + "documentation":"

The version number of the launch template. You must specify a version number.

", "locationName":"version" } }, @@ -13538,8 +13470,7 @@ "documentation":"

The public key. For API calls, the text must be base64-encoded. For command line tools, base64 encoding is performed for you.

", "locationName":"publicKeyMaterial" } - }, - "documentation":"

Contains the parameters for ImportKeyPair.

" + } }, "ImportKeyPairResult":{ "type":"structure", @@ -13554,8 +13485,7 @@ "documentation":"

The key pair name you provided.

", "locationName":"keyName" } - }, - "documentation":"

Contains the output of ImportKeyPair.

" + } }, "ImportSnapshotRequest":{ "type":"structure", @@ -14141,7 +14071,7 @@ "locationName":"cpuCredits" } }, - "documentation":"

Describes the credit option for CPU usage of a T2 instance.

" + "documentation":"

Describes the credit option for CPU usage of a T2 or T3 instance.

" }, "InstanceCreditSpecificationList":{ "type":"list", @@ -14169,7 +14099,7 @@ "documentation":"

The credit option for CPU usage of the instance. Valid values are standard and unlimited.

" } }, - "documentation":"

Describes the credit option for CPU usage of a T2 instance.

" + "documentation":"

Describes the credit option for CPU usage of a T2 or T3 instance.

" }, "InstanceExportDetails":{ "type":"structure", @@ -14736,6 +14666,13 @@ "t2.large", "t2.xlarge", "t2.2xlarge", + "t3.nano", + "t3.micro", + "t3.small", + "t3.medium", + "t3.large", + "t3.xlarge", + "t3.2xlarge", "m1.small", "m1.medium", "m1.large", @@ -14847,6 +14784,7 @@ "d2.4xlarge", "d2.8xlarge", "f1.2xlarge", + "f1.4xlarge", "f1.16xlarge", "m5.large", "m5.xlarge", @@ -14869,7 +14807,10 @@ "z1d.2xlarge", "z1d.3xlarge", "z1d.6xlarge", - "z1d.12xlarge" + "z1d.12xlarge", + "u-6tb1.metal", + "u-9tb1.metal", + "u-12tb1.metal" ] }, "InstanceTypeList":{ @@ -14960,7 +14901,7 @@ }, "PrefixListIds":{ "shape":"PrefixListIdList", - "documentation":"

(EC2-VPC only; valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

", + "documentation":"

[EC2-VPC only] One or more prefix list IDs for an AWS service. With AuthorizeSecurityGroupEgress, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

", "locationName":"prefixListIds" }, "ToPort":{ @@ -16217,8 +16158,7 @@ "documentation":"

The IDs of the Dedicated Hosts to modify.

", "locationName":"hostId" } - }, - "documentation":"

Contains the parameters for ModifyHosts.

" + } }, "ModifyHostsResult":{ "type":"structure", @@ -16233,8 +16173,7 @@ "documentation":"

The IDs of the Dedicated Hosts that could not be modified. Check whether the setting you requested can be used.

", "locationName":"unsuccessful" } - }, - "documentation":"

Contains the output of ModifyHosts.

" + } }, "ModifyIdFormatRequest":{ "type":"structure", @@ -16479,8 +16418,7 @@ "documentation":"

The tenancy for the instance.

", "locationName":"tenancy" } - }, - "documentation":"

Contains the parameters for ModifyInstancePlacement.

" + } }, "ModifyInstancePlacementResult":{ "type":"structure", @@ -16490,8 +16428,7 @@ "documentation":"

Is true if the request succeeds, and an error otherwise.

", "locationName":"return" } - }, - "documentation":"

Contains the output of ModifyInstancePlacement.

" + } }, "ModifyLaunchTemplateRequest":{ "type":"structure", @@ -16690,8 +16627,7 @@ "documentation":"

The ID of the subnet.

", "locationName":"subnetId" } - }, - "documentation":"

Contains the parameters for ModifySubnetAttribute.

" + } }, "ModifyVolumeAttributeRequest":{ "type":"structure", @@ -16766,8 +16702,7 @@ "documentation":"

The ID of the VPC.

", "locationName":"vpcId" } - }, - "documentation":"

Contains the parameters for ModifyVpcAttribute.

" + } }, "ModifyVpcEndpointConnectionNotificationRequest":{ "type":"structure", @@ -16948,7 +16883,7 @@ }, "DryRun":{ "shape":"Boolean", - "documentation":"

Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" }, "RequesterPeeringConnectionOptions":{ "shape":"PeeringConnectionOptionsRequest", @@ -16992,10 +16927,9 @@ }, "DryRun":{ "shape":"Boolean", - "documentation":"

Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } - }, - "documentation":"

Contains the parameters for ModifyVpcTenancy.

" + } }, "ModifyVpcTenancyResult":{ "type":"structure", @@ -17005,8 +16939,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of ModifyVpcTenancy.

" + } }, "MonitorInstancesRequest":{ "type":"structure", @@ -17850,12 +17783,12 @@ }, "AllowEgressFromLocalClassicLinkToRemoteVpc":{ "shape":"Boolean", - "documentation":"

If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC via ClassicLink to instances in a peer VPC.

", + "documentation":"

If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC using ClassicLink to instances in a peer VPC.

", "locationName":"allowEgressFromLocalClassicLinkToRemoteVpc" }, "AllowEgressFromLocalVpcToRemoteClassicLink":{ "shape":"Boolean", - "documentation":"

If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC via ClassicLink.

", + "documentation":"

If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC using ClassicLink.

", "locationName":"allowEgressFromLocalVpcToRemoteClassicLink" } }, @@ -17870,11 +17803,11 @@ }, "AllowEgressFromLocalClassicLinkToRemoteVpc":{ "shape":"Boolean", - "documentation":"

If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC via ClassicLink to instances in a peer VPC.

" + "documentation":"

If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC using ClassicLink to instances in a peer VPC.

" }, "AllowEgressFromLocalVpcToRemoteClassicLink":{ "shape":"Boolean", - "documentation":"

If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC via ClassicLink.

" + "documentation":"

If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC using ClassicLink.

" } }, "documentation":"

The VPC peering connection options.

" @@ -18022,7 +17955,7 @@ "locationName":"prefixListId" } }, - "documentation":"

[EC2-VPC only] The ID of the prefix.

" + "documentation":"

Describes a prefix list ID.

" }, "PrefixListIdList":{ "type":"list", @@ -18731,8 +18664,7 @@ "documentation":"

The ID of the VPC peering connection.

", "locationName":"vpcPeeringConnectionId" } - }, - "documentation":"

Contains the parameters for RejectVpcPeeringConnection.

" + } }, "RejectVpcPeeringConnectionResult":{ "type":"structure", @@ -18742,8 +18674,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of RejectVpcPeeringConnection.

" + } }, "ReleaseAddressRequest":{ "type":"structure", @@ -18773,8 +18704,7 @@ "documentation":"

The IDs of the Dedicated Hosts to release.

", "locationName":"hostId" } - }, - "documentation":"

Contains the parameters for ReleaseHosts.

" + } }, "ReleaseHostsResult":{ "type":"structure", @@ -18789,8 +18719,7 @@ "documentation":"

The IDs of the Dedicated Hosts that could not be released, including an error message.

", "locationName":"unsuccessful" } - }, - "documentation":"

Contains the output of ReleaseHosts.

" + } }, "ReplaceIamInstanceProfileAssociationRequest":{ "type":"structure", @@ -18841,8 +18770,7 @@ "documentation":"

The ID of the new network ACL to associate with the subnet.

", "locationName":"networkAclId" } - }, - "documentation":"

Contains the parameters for ReplaceNetworkAclAssociation.

" + } }, "ReplaceNetworkAclAssociationResult":{ "type":"structure", @@ -18852,8 +18780,7 @@ "documentation":"

The ID of the new association.

", "locationName":"newAssociationId" } - }, - "documentation":"

Contains the output of ReplaceNetworkAclAssociation.

" + } }, "ReplaceNetworkAclEntryRequest":{ "type":"structure", @@ -18915,8 +18842,7 @@ "documentation":"

The rule number of the entry to replace.

", "locationName":"ruleNumber" } - }, - "documentation":"

Contains the parameters for ReplaceNetworkAclEntry.

" + } }, "ReplaceRouteRequest":{ "type":"structure", @@ -18972,8 +18898,7 @@ "documentation":"

The ID of a VPC peering connection.

", "locationName":"vpcPeeringConnectionId" } - }, - "documentation":"

Contains the parameters for ReplaceRoute.

" + } }, "ReplaceRouteTableAssociationRequest":{ "type":"structure", @@ -18997,8 +18922,7 @@ "documentation":"

The ID of the new route table to associate with the subnet.

", "locationName":"routeTableId" } - }, - "documentation":"

Contains the parameters for ReplaceRouteTableAssociation.

" + } }, "ReplaceRouteTableAssociationResult":{ "type":"structure", @@ -19008,8 +18932,7 @@ "documentation":"

The ID of the new association.

", "locationName":"newAssociationId" } - }, - "documentation":"

Contains the output of ReplaceRouteTableAssociation.

" + } }, "ReportInstanceReasonCodes":{ "type":"string", @@ -19179,7 +19102,7 @@ }, "CreditSpecification":{ "shape":"CreditSpecificationRequest", - "documentation":"

The credit option for CPU usage of the instance. Valid for T2 instances only.

" + "documentation":"

The credit option for CPU usage of the instance. Valid for T2 or T3 instances only.

" }, "CpuOptions":{ "shape":"LaunchTemplateCpuOptionsRequest", @@ -20054,6 +19977,7 @@ "type":"string", "enum":[ "customer-gateway", + "dedicated-host", "dhcp-options", "image", "instance", @@ -20299,8 +20223,7 @@ "documentation":"

Not supported. Use a set of IP permissions to specify a destination security group.

", "locationName":"sourceSecurityGroupOwnerId" } - }, - "documentation":"

Contains the parameters for RevokeSecurityGroupEgress.

" + } }, "RevokeSecurityGroupIngressRequest":{ "type":"structure", @@ -20346,8 +20269,7 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" } - }, - "documentation":"

Contains the parameters for RevokeSecurityGroupIngress.

" + } }, "Route":{ "type":"structure", @@ -20671,7 +20593,7 @@ }, "CreditSpecification":{ "shape":"CreditSpecificationRequest", - "documentation":"

The credit option for CPU usage of the instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see T2 Instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard

" + "documentation":"

The credit option for CPU usage of the instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard (T2 instances) or unlimited (T3 instances)

" }, "CpuOptions":{ "shape":"CpuOptionsRequest", @@ -22786,7 +22708,7 @@ "locationName":"instanceId" } }, - "documentation":"

Describes the T2 instance whose credit option for CPU usage was successfully modified.

" + "documentation":"

Describes the T2 or T3 instance whose credit option for CPU usage was successfully modified.

" }, "SuccessfulInstanceCreditSpecificationSet":{ "type":"list", @@ -22810,7 +22732,7 @@ "members":{ "Key":{ "shape":"String", - "documentation":"

The key of the tag.

Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:

", + "documentation":"

The key of the tag.

Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:.

", "locationName":"key" }, "Value":{ @@ -22866,7 +22788,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are fleet, instance, snapshot, and volume. To tag a resource after it has been created, see CreateTags.

", + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are fleet, dedicated-host, instance, snapshot, and volume. To tag a resource after it has been created, see CreateTags.

", "locationName":"resourceType" }, "Tags":{ @@ -23186,11 +23108,11 @@ }, "Error":{ "shape":"UnsuccessfulInstanceCreditSpecificationItemError", - "documentation":"

The applicable error for the T2 instance whose credit option for CPU usage was not modified.

", + "documentation":"

The applicable error for the T2 or T3 instance whose credit option for CPU usage was not modified.

", "locationName":"error" } }, - "documentation":"

Describes the T2 instance whose credit option for CPU usage was not modified.

" + "documentation":"

Describes the T2 or T3 instance whose credit option for CPU usage was not modified.

" }, "UnsuccessfulInstanceCreditSpecificationItemError":{ "type":"structure", @@ -23206,7 +23128,7 @@ "locationName":"message" } }, - "documentation":"

Information about the error for the T2 instance whose credit option for CPU usage was not modified.

" + "documentation":"

Information about the error for the T2 or T3 instance whose credit option for CPU usage was not modified.

" }, "UnsuccessfulInstanceCreditSpecificationSet":{ "type":"list", @@ -23286,8 +23208,7 @@ "shape":"IpPermissionList", "documentation":"

The IP permissions for the security group rule.

" } - }, - "documentation":"

Contains the parameters for UpdateSecurityGroupRuleDescriptionsEgress.

" + } }, "UpdateSecurityGroupRuleDescriptionsEgressResult":{ "type":"structure", @@ -23297,8 +23218,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of UpdateSecurityGroupRuleDescriptionsEgress.

" + } }, "UpdateSecurityGroupRuleDescriptionsIngressRequest":{ "type":"structure", @@ -23320,8 +23240,7 @@ "shape":"IpPermissionList", "documentation":"

The IP permissions for the security group rule.

" } - }, - "documentation":"

Contains the parameters for UpdateSecurityGroupRuleDescriptionsIngress.

" + } }, "UpdateSecurityGroupRuleDescriptionsIngressResult":{ "type":"structure", @@ -23331,8 +23250,7 @@ "documentation":"

Returns true if the request succeeds; otherwise, returns an error.

", "locationName":"return" } - }, - "documentation":"

Contains the output of UpdateSecurityGroupRuleDescriptionsIngress.

" + } }, "UserBucket":{ "type":"structure", diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 40cbc137..c0c38a32 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -634,7 +634,7 @@ "members":{ "subnets":{ "shape":"StringList", - "documentation":"

The subnets associated with the task or service. There is a limit of 10 subnets able to be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" + "documentation":"

The subnets associated with the task or service. There is a limit of 16 subnets able to be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" }, "securityGroups":{ "shape":"StringList", @@ -814,11 +814,11 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" + "documentation":"

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" }, "image":{ "shape":"String", - "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image are not propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image are not propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" }, "repositoryCredentials":{ "shape":"RepositoryCredentials", @@ -826,23 +826,23 @@ }, "cpu":{ "shape":"Integer", - "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to 2 CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" + "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to 2 CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" }, "memory":{ "shape":"BoxedInteger", - "documentation":"

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional and the only requirement is that the total amount of memory reserved for all containers within a task be lower than the task memory value.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional and the only requirement is that the total amount of memory reserved for all containers within a task be lower than the task memory value.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "links":{ "shape":"StringList", - "documentation":"

The link parameter allows containers to communicate with each other without the need for port mappings. Only supported if the network mode of a task definition is set to bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. For more information about linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run .

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" + "documentation":"

The link parameter allows containers to communicate with each other without the need for port mappings. Only supported if the network mode of a task definition is set to bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. For more information about linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run .

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" }, "portMappings":{ "shape":"PortMappingList", - "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" + "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" }, "essential":{ "shape":"BoxedBoolean", @@ -850,23 +850,23 @@ }, "entryPoint":{ "shape":"StringList", - "documentation":"

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" + "documentation":"

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" }, "command":{ "shape":"StringList", - "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

" + "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

" }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" }, "mountPoints":{ "shape":"MountPointList", - "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" + "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" }, "volumesFrom":{ "shape":"VolumeFromList", - "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" + "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" }, "linuxParameters":{ "shape":"LinuxParameters", @@ -874,59 +874,71 @@ }, "hostname":{ "shape":"String", - "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if using the awsvpc networkMode.

" + "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if using the awsvpc networkMode.

" }, "user":{ "shape":"String", - "documentation":"

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

This parameter is not supported for Windows containers.

" }, "workingDirectory":{ "shape":"String", - "documentation":"

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" + "documentation":"

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" }, "disableNetworking":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

" }, "privileged":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" }, "readonlyRootFilesystem":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" }, "dnsServers":{ "shape":"StringList", - "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

" }, "dnsSearchDomains":{ "shape":"StringList", - "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" }, "extraHosts":{ "shape":"HostEntryList", - "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. If using the Fargate launch type, this may be used to list non-Fargate hosts to which the container can talk. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. If using the Fargate launch type, this may be used to list non-Fargate hosts to which the container can talk. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers.

" }, "dockerSecurityOptions":{ "shape":"StringList", - "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

" + }, + "interactive":{ + "shape":"BoxedBoolean", + "documentation":"

When this parameter is true, this allows you to deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the Create a container section of the Docker Remote API and the --interactive option to docker run.

" + }, + "pseudoTerminal":{ + "shape":"BoxedBoolean", + "documentation":"

When this parameter is true, a TTY is allocated. This parameter maps to Tty in the Create a container section of the Docker Remote API and the --tty option to docker run.

" }, "dockerLabels":{ "shape":"DockerLabelsMap", - "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

This parameter is not supported for Windows containers.

" }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

If using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

If using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheck":{ "shape":"HealthCheck", - "documentation":"

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + "documentation":"

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + }, + "systemControls":{ + "shape":"SystemControls", + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. When you do, the container that is started last will determine which systemControls parameters take effect.

" } }, "documentation":"

Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

" @@ -1583,7 +1595,7 @@ "documentation":"

Custom metadata to add to your Docker volume. This parameter maps to Labels in the Create a volume section of the Docker Remote API and the xxlabel option to docker volume create .

" } }, - "documentation":"

The configuration for the Docker volume. This parameter is specified when using Docker volumes.

" + "documentation":"

This parameter is specified when using Docker volumes. Docker volumes are only supported when using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" }, "Double":{"type":"double"}, "EnvironmentVariables":{ @@ -1614,7 +1626,7 @@ "members":{ "command":{ "shape":"StringList", - "documentation":"

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. For example:

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

" + "documentation":"

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. For example:

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

" }, "interval":{ "shape":"BoxedInteger", @@ -1633,7 +1645,7 @@ "documentation":"

The optional grace period within which to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You may specify between 0 and 300 seconds. The startPeriod is disabled by default.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

" } }, - "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

" + "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

The following are notes about container health check support:

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS Container Agent.

  • Container health checks are supported for Fargate tasks if using platform version version 1.1.0 or greater. For more information, see AWS Fargate Platform Versions.

  • Container health checks are not supported for tasks that are part of a service that is configured to use a Classic Load Balancer.

" }, "HealthStatus":{ "type":"string", @@ -1688,11 +1700,11 @@ "members":{ "add":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

If you are using tasks that use the Fargate launch type, the add parameter is not supported.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

If you are using tasks that use the Fargate launch type, the add parameter is not supported.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" }, "drop":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" } }, "documentation":"

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information on the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information on these Linux capabilities, see the capabilities(7) Linux manual page.

" @@ -1727,7 +1739,7 @@ }, "devices":{ "shape":"DevicesList", - "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" + "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" }, "initProcessEnabled":{ "shape":"BoxedBoolean", @@ -2665,7 +2677,7 @@ }, "port":{ "shape":"BoxedInteger", - "documentation":"

The port value used if your service discovery service specified an SRV record. This field is required if both the awsvpc network mode and SRV records are used.

" + "documentation":"

The port value used if your service discovery service specified an SRV record. This field may be used if both the awsvpc network mode and SRV records are used.

" }, "containerName":{ "shape":"String", @@ -2872,6 +2884,24 @@ } } }, + "SystemControl":{ + "type":"structure", + "members":{ + "namespace":{ + "shape":"String", + "documentation":"

The namespaced kernel parameter to set a value for.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value for the namespaced kernel parameter specifed in namespace.

" + } + }, + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. When you do, the container that is started last will determine which systemControls parameters take effect.

" + }, + "SystemControls":{ + "type":"list", + "member":{"shape":"SystemControl"} + }, "TargetNotFoundException":{ "type":"structure", "members":{ @@ -3356,7 +3386,7 @@ }, "dockerVolumeConfiguration":{ "shape":"DockerVolumeConfiguration", - "documentation":"

The configuration for the Docker volume. This parameter is specified when using Docker volumes.

" + "documentation":"

This parameter is specified when using Docker volumes. Docker volumes are only supported when using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" } }, "documentation":"

A data volume used in a task definition. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json index 1f3813db..449bbf46 100644 --- a/botocore/data/eks/2017-11-01/service-2.json +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -79,7 +79,7 @@ {"shape":"ServerException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Lists the Amazon EKS clusters in your AWS account in the specified region.

" + "documentation":"

Lists the Amazon EKS clusters in your AWS account in the specified Region.

" } }, "shapes":{ @@ -148,6 +148,10 @@ "clientRequestToken":{ "shape":"String", "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

" + }, + "platformVersion":{ + "shape":"String", + "documentation":"

The platform version of your Amazon EKS cluster. For more information, see Platform Versions in the Amazon EKS User Guide .

" } }, "documentation":"

An object representing an Amazon EKS cluster.

" @@ -185,11 +189,11 @@ }, "roleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the Amazon EKS User Guide

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the Amazon EKS User Guide .

" }, "resourcesVpcConfig":{ "shape":"VpcConfigRequest", - "documentation":"

The VPC subnets and security groups used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide.

" + "documentation":"

The VPC subnets and security groups used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide. You must specify at least two subnets. You may specify up to 5 security groups, but we recommend that you use a dedicated security group for your cluster control plane.

" }, "clientRequestToken":{ "shape":"String", @@ -290,7 +294,7 @@ "members":{ "clusters":{ "shape":"StringList", - "documentation":"

A list of all of the clusters for your account in the specified region.

" + "documentation":"

A list of all of the clusters for your account in the specified Region.

" }, "nextToken":{ "shape":"String", @@ -333,7 +337,7 @@ }, "message":{"shape":"String"} }, - "documentation":"

The specified resource could not be found. You can view your available clusters with ListClusters. Amazon EKS clusters are region-specific.

", + "documentation":"

The specified resource could not be found. You can view your available clusters with ListClusters. Amazon EKS clusters are Region-specific.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -356,7 +360,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The service is unavailable, back off and retry the operation.

", + "documentation":"

The service is unavailable. Back off and retry the operation.

", "error":{"httpStatusCode":503}, "exception":true, "fault":true @@ -380,7 +384,7 @@ "documentation":"

The supported Availability Zones for your account. Choose subnets in these Availability Zones for your cluster.

" } }, - "documentation":"

At least one of your specified cluster subnets is in an Availability Zone that does not support Amazon EKS. The exception output will specify the supported Availability Zones for your account, from which you can choose subnets for your cluster.

", + "documentation":"

At least one of your specified cluster subnets is in an Availability Zone that does not support Amazon EKS. The exception output specifies the supported Availability Zones for your account, from which you can choose subnets for your cluster.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -418,5 +422,5 @@ "documentation":"

An object representing an Amazon EKS cluster VPC configuration response.

" } }, - "documentation":"

Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs three Kubernetes control plane instances across three Availability Zones to ensure high availability. Amazon EKS automatically detects and replaces unhealthy control plane instances, and it provides automated version upgrades and patching for them.

Amazon EKS is also integrated with many AWS services to provide scalability and security for your applications, including the following:

  • Elastic Load Balancing for load distribution

  • IAM for authentication

  • Amazon VPC for isolation

Amazon EKS runs up to date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

" + "documentation":"

Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

" } diff --git a/botocore/data/eks/2017-11-01/service-2.sdk-extras.json b/botocore/data/eks/2017-11-01/service-2.sdk-extras.json new file mode 100644 index 00000000..b636c211 --- /dev/null +++ b/botocore/data/eks/2017-11-01/service-2.sdk-extras.json @@ -0,0 +1,8 @@ +{ + "version": 1.0, + "merge": { + "metadata": { + "serviceId":"EKS" + } + } +} diff --git a/botocore/data/elasticache/2014-09-30/service-2.json b/botocore/data/elasticache/2014-09-30/service-2.json index ca9a3351..33222a9c 100644 --- a/botocore/data/elasticache/2014-09-30/service-2.json +++ b/botocore/data/elasticache/2014-09-30/service-2.json @@ -3,6 +3,7 @@ "apiVersion":"2014-09-30", "endpointPrefix":"elasticache", "serviceFullName":"Amazon ElastiCache", + "serviceId":"ElastiCache", "signatureVersion":"v4", "xmlNamespace":"http://elasticache.amazonaws.com/doc/2014-09-30/", "protocol":"query" diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 95f4b937..d3f5f846 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -28,7 +28,7 @@ {"shape":"TagQuotaPerResourceExceeded"}, {"shape":"InvalidARNFault"} ], - "documentation":"

Adds up to 50 cost allocation tags to the named resource. A cost allocation tag is a key-value pair where the key and value are case-sensitive. You can use cost allocation tags to categorize and track your AWS costs.

When you apply tags to your ElastiCache resources, AWS generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services. For more information, see Using Cost Allocation Tags in Amazon ElastiCache in the ElastiCache User Guide.

" + "documentation":"

Adds up to 50 cost allocation tags to the named resource. A cost allocation tag is a key-value pair where the key and value are case-sensitive. You can use cost allocation tags to categorize and track your AWS costs.

When you apply tags to your ElastiCache resources, AWS generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services. For more information, see Using Cost Allocation Tags in Amazon ElastiCache in the ElastiCache User Guide.

" }, "AuthorizeCacheSecurityGroupIngress":{ "name":"AuthorizeCacheSecurityGroupIngress", @@ -69,7 +69,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

" + "documentation":"

Makes a copy of an existing snapshot.

This operation is valid for Redis only.

Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.

You could receive the following error messages.

Error Messages

  • Error Message: The S3 bucket %s is outside of the region.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s does not exist.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The S3 bucket %s is not owned by the authenticated user.

    Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

    Solution: Contact your system administrator to get the needed permissions.

  • Error Message: The S3 bucket %s already contains an object with key %s.

    Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

  • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

    Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

    Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

  • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

    Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.

" }, "CreateCacheCluster":{ "name":"CreateCacheCluster", @@ -98,7 +98,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis.

Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.

" + "documentation":"

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis.

This operation is not supported for Redis (cluster mode enabled) clusters.

" }, "CreateCacheParameterGroup":{ "name":"CreateCacheParameterGroup", @@ -118,7 +118,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a new Amazon ElastiCache cache parameter group. An ElastiCache cache parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster or replication group using the CacheParameterGroup.

A newly created CacheParameterGroup is an exact duplicate of the default parameter group for the CacheParameterGroupFamily. To customize the newly created CacheParameterGroup you can change the values of specific parameters. For more information, see:

" + "documentation":"

Creates a new Amazon ElastiCache cache parameter group. An ElastiCache cache parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster or replication group using the CacheParameterGroup.

A newly created CacheParameterGroup is an exact duplicate of the default parameter group for the CacheParameterGroupFamily. To customize the newly created CacheParameterGroup you can change the values of specific parameters. For more information, see:

" }, "CreateCacheSecurityGroup":{ "name":"CreateCacheSecurityGroup", @@ -186,7 +186,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

" + "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -212,6 +212,33 @@ ], "documentation":"

Creates a copy of an entire cluster or replication group at a specific moment in time.

This operation is valid for Redis only.

" }, + "DecreaseReplicaCount":{ + "name":"DecreaseReplicaCount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecreaseReplicaCountMessage"}, + "output":{ + "shape":"DecreaseReplicaCountResult", + "resultWrapper":"DecreaseReplicaCountResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InsufficientCacheClusterCapacityFault"}, + {"shape":"ClusterQuotaForCustomerExceededFault"}, + {"shape":"NodeGroupsPerReplicationGroupQuotaExceededFault"}, + {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, + {"shape":"NoOperationFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

Dynamically decreases the number of replics in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" + }, "DeleteCacheCluster":{ "name":"DeleteCacheCluster", "http":{ @@ -232,7 +259,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation.

This operation cannot be used to delete a cluster that is the last read replica of a replication group or node group (shard) that has Multi-AZ mode enabled or a cluster from a Redis (cluster mode enabled) replication group.

Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.

" + "documentation":"

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation.

This operation cannot be used to delete a cluster that is the last read replica of a replication group or node group (shard) that has Multi-AZ mode enabled or a cluster from a Redis (cluster mode enabled) replication group.

This operation is not valid for Redis (cluster mode enabled) clusters.

" }, "DeleteCacheParameterGroup":{ "name":"DeleteCacheParameterGroup", @@ -526,6 +553,32 @@ ], "documentation":"

Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster.

This operation is valid for Redis only.

" }, + "IncreaseReplicaCount":{ + "name":"IncreaseReplicaCount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IncreaseReplicaCountMessage"}, + "output":{ + "shape":"IncreaseReplicaCountResult", + "resultWrapper":"IncreaseReplicaCountResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InsufficientCacheClusterCapacityFault"}, + {"shape":"ClusterQuotaForCustomerExceededFault"}, + {"shape":"NodeGroupsPerReplicationGroupQuotaExceededFault"}, + {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"NoOperationFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

Dynamically increases the number of replics in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" + }, "ListAllowedNodeTypeModifications":{ "name":"ListAllowedNodeTypeModifications", "http":{ @@ -561,7 +614,7 @@ {"shape":"SnapshotNotFoundFault"}, {"shape":"InvalidARNFault"} ], - "documentation":"

Lists all cost allocation tags currently on the named resource. A cost allocation tag is a key-value pair where the key is case-sensitive and the value is optional. You can use cost allocation tags to categorize and track your AWS costs.

You can have a maximum of 50 cost allocation tags on an ElastiCache resource. For more information, see Using Cost Allocation Tags in Amazon ElastiCache.

" + "documentation":"

Lists all cost allocation tags currently on the named resource. A cost allocation tag is a key-value pair where the key is case-sensitive and the value is optional. You can use cost allocation tags to categorize and track your AWS costs.

If the cluster is not in the available state, ListTagsForResource returns an error.

You can have a maximum of 50 cost allocation tags on an ElastiCache resource. For more information, see Monitoring Costs with Tags.

" }, "ModifyCacheCluster":{ "name":"ModifyCacheCluster", @@ -653,7 +706,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Modifies the settings for a replication group.

Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.

This operation is valid for Redis only.

" + "documentation":"

Modifies the settings for a replication group.

For Redis (cluster mode enabled) clusters, this operation cannot be used to change a cluster's node type or engine version. For more information, see:

This operation is valid for Redis only.

" }, "ModifyReplicationGroupShardConfiguration":{ "name":"ModifyReplicationGroupShardConfiguration", @@ -677,7 +730,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Performs horizontal scaling on a Redis (cluster mode enabled) cluster with no downtime. Requires Redis engine version 3.2.10 or newer. For information on upgrading your engine to a newer version, see Upgrading Engine Versions in the Amazon ElastiCache User Guide.

For more information on ElastiCache for Redis online horizontal scaling, see ElastiCache for Redis Horizontal Scaling

" + "documentation":"

Modifies a replication group's shards (node groups) by allowing you to add shards, remove shards, or rebalance the keyspaces among exisiting shards.

" }, "PurchaseReservedCacheNodesOffering":{ "name":"PurchaseReservedCacheNodesOffering", @@ -714,7 +767,7 @@ {"shape":"InvalidCacheClusterStateFault"}, {"shape":"CacheClusterNotFoundFault"} ], - "documentation":"

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING.

The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.

When the reboot is complete, a cluster event is created.

Rebooting a cluster is currently supported on Memcached and Redis (cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled) clusters.

If you make changes to parameters that require a Redis (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

" + "documentation":"

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING.

The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.

When the reboot is complete, a cluster event is created.

Rebooting a cluster is currently supported on Memcached and Redis (cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled) clusters.

If you make changes to parameters that require a Redis (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -795,7 +848,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from master node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from master node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ with Automatic Failover in the ElastiCache User Guide.

" + "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from master node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from master node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ with Automatic Failover in the ElastiCache User Guide.

" } }, "shapes":{ @@ -836,6 +889,12 @@ }, "documentation":"

Represents the input of an AddTagsToResource operation.

" }, + "AllowedNodeGroupId":{ + "type":"string", + "max":4, + "min":1, + "pattern":"\\d+" + }, "AllowedNodeTypeModificationsMessage":{ "type":"structure", "members":{ @@ -946,7 +1005,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Engine":{ "shape":"String", @@ -1023,11 +1082,11 @@ }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Default: false

" + "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Default: false

" + "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

" } }, "documentation":"

Contains all of the attributes of a specific cluster.

", @@ -1091,7 +1150,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2

" + "documentation":"

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0

" }, "CacheEngineDescription":{ "shape":"String", @@ -1157,7 +1216,7 @@ "documentation":"

The Availability Zone where this node was created and now resides.

" } }, - "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "CacheNodeIdsList":{ "type":"list", @@ -1210,7 +1269,7 @@ }, "ChangeType":{ "shape":"ChangeType", - "documentation":"

Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

" + "documentation":"

Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

" } }, "documentation":"

A parameter that has a different value for each cache node type it is applied to. For example, in a Redis cluster, a cache.m1.large cache node type would have a larger maxmemory value than a cache.m1.small type.

" @@ -1252,7 +1311,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2

" + "documentation":"

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0

" }, "Description":{ "shape":"String", @@ -1603,6 +1662,28 @@ }, "exception":true }, + "ConfigureShard":{ + "type":"structure", + "required":[ + "NodeGroupId", + "NewReplicaCount" + ], + "members":{ + "NodeGroupId":{ + "shape":"AllowedNodeGroupId", + "documentation":"

The 4-digit id for the node group you are configuring. For Redis (cluster mode disabled) replication groups, the node group id is always 0001. To find a Redis (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id.

" + }, + "NewReplicaCount":{ + "shape":"Integer", + "documentation":"

The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis replication group you are working with.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ with Automatic Failover is enabled: 1

    • If Multi-AZ with Automatic Failover is not enable: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" + }, + "PreferredAvailabilityZones":{ + "shape":"PreferredAvailabilityZoneList", + "documentation":"

A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache for Redis selects the availability zone for each of the replicas.

" + } + }, + "documentation":"

Node group (shard) configuration options when adding or removing replicas. Each node group (shard) configuration has the following members: NodeGroupId, NewReplicaCount, and PreferredAvailabilityZones.

" + }, "CopySnapshotMessage":{ "type":"structure", "required":[ @@ -1620,7 +1701,7 @@ }, "TargetBucket":{ "shape":"String", - "documentation":"

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

" + "documentation":"

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

" } }, "documentation":"

Represents the input of a CopySnapshotMessage operation.

" @@ -1641,7 +1722,7 @@ }, "ReplicationGroupId":{ "shape":"String", - "documentation":"

Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.

The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.

If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.

This parameter is only valid if the Engine parameter is redis.

" + "documentation":"

The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.

If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.

This parameter is only valid if the Engine parameter is redis.

" }, "AZMode":{ "shape":"AZMode", @@ -1661,7 +1742,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Engine":{ "shape":"String", @@ -1669,7 +1750,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

" + "documentation":"

The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

" }, "CacheParameterGroupName":{ "shape":"String", @@ -1677,7 +1758,7 @@ }, "CacheSubnetGroupName":{ "shape":"String", - "documentation":"

The name of the subnet group to be used for the cluster.

Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).

If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.

" + "documentation":"

The name of the subnet group to be used for the cluster.

Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).

If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.

" }, "CacheSecurityGroupNames":{ "shape":"CacheSecurityGroupNameList", @@ -1717,7 +1798,7 @@ }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", - "documentation":"

The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.

This parameter is only valid if the Engine parameter is redis.

Default: 0 (i.e., automatic backups are disabled for this cluster).

" + "documentation":"

The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.

This parameter is only valid if the Engine parameter is redis.

Default: 0 (i.e., automatic backups are disabled for this cache cluster).

" }, "SnapshotWindow":{ "shape":"String", @@ -1725,7 +1806,7 @@ }, "AuthToken":{ "shape":"String", - "documentation":"

Reserved parameter. The password used to access a password protected server.

This parameter is valid only if:

  • The parameter TransitEncryptionEnabled was set to true when the cluster was created.

  • The line requirepass was added to the database configuration file.

Password constraints:

  • Must be only printable ASCII characters.

  • Must be at least 16 characters and no more than 128 characters in length.

  • Cannot contain any of the following characters: '/', '\"', or '@'.

For more information, see AUTH password at http://redis.io/commands/AUTH.

" + "documentation":"

Reserved parameter. The password used to access a password protected server.

Password constraints:

  • Must be only printable ASCII characters.

  • Must be at least 16 characters and no more than 128 characters in length.

  • Cannot contain any of the following characters: '/', '\"', or '@'.

For more information, see AUTH password at http://redis.io/commands/AUTH.

" } }, "documentation":"

Represents the input of a CreateCacheCluster operation.

" @@ -1750,7 +1831,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2

" + "documentation":"

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0

" }, "Description":{ "shape":"String", @@ -1843,7 +1924,7 @@ }, "NumCacheClusters":{ "shape":"IntegerOptional", - "documentation":"

The number of clusters this replication group initially has.

This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.

If AutomaticFailoverEnabled is true, the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.

The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).

" + "documentation":"

The number of clusters this replication group initially has.

This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.

If AutomaticFailoverEnabled is true, the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.

The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas).

" }, "PreferredCacheClusterAZs":{ "shape":"AvailabilityZonesList", @@ -1859,11 +1940,11 @@ }, "NodeGroupConfiguration":{ "shape":"NodeGroupConfigurationList", - "documentation":"

A list of node group (shard) configuration options. Each node group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter.

" + "documentation":"

A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots.

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file, you must configure each node group (shard) using this parameter because you must specify the slots for each node group.

" }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Engine":{ "shape":"String", @@ -1871,7 +1952,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

" + "documentation":"

The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.

" }, "CacheParameterGroupName":{ "shape":"String", @@ -1879,7 +1960,7 @@ }, "CacheSubnetGroupName":{ "shape":"String", - "documentation":"

The name of the cache subnet group to be used for the replication group.

If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.

" + "documentation":"

The name of the cache subnet group to be used for the replication group.

If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.

" }, "CacheSecurityGroupNames":{ "shape":"CacheSecurityGroupNameList", @@ -1891,7 +1972,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key does not have to be accompanied by a tag value.

" + "documentation":"

A list of cost allocation tags to be added to this resource. A tag is a key-value pair.

" }, "SnapshotArns":{ "shape":"SnapshotArnsList", @@ -1927,15 +2008,15 @@ }, "AuthToken":{ "shape":"String", - "documentation":"

Reserved parameter. The password used to access a password protected server.

This parameter is valid only if:

  • The parameter TransitEncryptionEnabled was set to true when the cluster was created.

  • The line requirepass was added to the database configuration file.

Password constraints:

  • Must be only printable ASCII characters.

  • Must be at least 16 characters and no more than 128 characters in length.

  • Cannot contain any of the following characters: '/', '\"', or '@'.

For more information, see AUTH password at http://redis.io/commands/AUTH.

" + "documentation":"

Reserved parameter. The password used to access a password protected server.

AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true.

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

Password constraints:

  • Must be only printable ASCII characters.

  • Must be at least 16 characters and no more than 128 characters in length.

  • Cannot contain any of the following characters: '/', '\"', or '@'.

For more information, see AUTH password at http://redis.io/commands/AUTH.

" }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.4 or later, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Default: false

" + "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6 or 4.x, and the cluster is being created in an Amazon VPC.

If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup.

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

This parameter is valid only if the Engine parameter is redis and the cluster is being created in an Amazon VPC.

Default: false

" + "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

" } }, "documentation":"

Represents the input of a CreateReplicationGroup operation.

" @@ -1971,6 +2052,41 @@ "Snapshot":{"shape":"Snapshot"} } }, + "DecreaseReplicaCountMessage":{ + "type":"structure", + "required":[ + "ReplicationGroupId", + "ApplyImmediately" + ], + "members":{ + "ReplicationGroupId":{ + "shape":"String", + "documentation":"

The id of the replication group from which you want to remove replica nodes.

" + }, + "NewReplicaCount":{ + "shape":"IntegerOptional", + "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ with Automatic Failover is enabled: 1

    • If Multi-AZ with Automatic Failover is not enabled: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" + }, + "ReplicaConfiguration":{ + "shape":"ReplicaConfigurationList", + "documentation":"

A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones.

" + }, + "ReplicasToRemove":{ + "shape":"RemoveReplicasList", + "documentation":"

A list of the node ids to remove from the replication group or node group (shard).

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

If True, the number of replica nodes is decreased immediately. If False, the number of replica nodes is decreased during the next maintenance window.

" + } + } + }, + "DecreaseReplicaCountResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, "DeleteCacheClusterMessage":{ "type":"structure", "required":["CacheClusterId"], @@ -2106,7 +2222,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of a specific cache parameter group family to return details for.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2

Constraints:

  • Must be 1 to 255 alphanumeric characters

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" + "documentation":"

The name of a specific cache parameter group family to return details for.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0

Constraints:

  • Must be 1 to 255 alphanumeric characters

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -2206,7 +2322,7 @@ "members":{ "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2

" + "documentation":"

The name of the cache parameter group family.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -2290,7 +2406,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Duration":{ "shape":"String", @@ -2324,7 +2440,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Duration":{ "shape":"String", @@ -2442,7 +2558,7 @@ "members":{ "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2

" + "documentation":"

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0

" }, "Marker":{ "shape":"String", @@ -2503,6 +2619,37 @@ }, "documentation":"

Represents the output of a DescribeEvents operation.

" }, + "IncreaseReplicaCountMessage":{ + "type":"structure", + "required":[ + "ReplicationGroupId", + "ApplyImmediately" + ], + "members":{ + "ReplicationGroupId":{ + "shape":"String", + "documentation":"

The id of the replication group to which you want to add replica nodes.

" + }, + "NewReplicaCount":{ + "shape":"IntegerOptional", + "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

" + }, + "ReplicaConfiguration":{ + "shape":"ReplicaConfigurationList", + "documentation":"

A list of ConfigureShard objects that can be used to configure each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones.

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

If True, the number of replica nodes is increased immediately. If False, the number of replica nodes is increased during the next maintenance window.

" + } + } + }, + "IncreaseReplicaCountResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, "InsufficientCacheClusterCapacityFault":{ "type":"structure", "members":{ @@ -2579,7 +2726,8 @@ "httpStatusCode":400, "senderFault":true }, - "exception":true + "exception":true, + "synthetic":true }, "InvalidParameterValueException":{ "type":"structure", @@ -2595,7 +2743,8 @@ "httpStatusCode":400, "senderFault":true }, - "exception":true + "exception":true, + "synthetic":true }, "InvalidReplicationGroupStateFault":{ "type":"structure", @@ -2692,11 +2841,11 @@ }, "AZMode":{ "shape":"AZMode", - "documentation":"

Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.

Valid values: single-az | cross-az.

This option is only supported for Memcached clusters.

You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone.

Only newly created nodes are located in different Availability Zones. For instructions on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

" + "documentation":"

Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.

Valid values: single-az | cross-az.

This option is only supported for Memcached clusters.

You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone.

Only newly created nodes are located in different Availability Zones. For instructions on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

" }, "NewAvailabilityZones":{ "shape":"PreferredAvailabilityZoneList", - "documentation":"

The list of Availability Zones where the new Memcached cache nodes are created.

This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.

This option is only supported on Memcached clusters.

Scenarios:

  • Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.

  • Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.

  • Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all pending operations.

The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.

If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

Impact of new add/remove requests upon pending requests

  • Scenario-1

    • Pending Action: Delete

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending delete.

  • Scenario-2

    • Pending Action: Delete

    • New Request: Create

    • Result: The new create, pending or immediate, replaces the pending delete.

  • Scenario-3

    • Pending Action: Create

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending create.

  • Scenario-4

    • Pending Action: Create

    • New Request: Create

    • Result: The new create is added to the pending create.

      Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.

" + "documentation":"

The list of Availability Zones where the new Memcached cache nodes are created.

This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.

This option is only supported on Memcached clusters.

Scenarios:

  • Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.

  • Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.

  • Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all pending operations.

The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.

If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

Impact of new add/remove requests upon pending requests

  • Scenario-1

    • Pending Action: Delete

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending delete.

  • Scenario-2

    • Pending Action: Delete

    • New Request: Create

    • Result: The new create, pending or immediate, replaces the pending delete.

  • Scenario-3

    • Pending Action: Create

    • New Request: Delete

    • Result: The new delete, pending or immediate, replaces the pending create.

  • Scenario-4

    • Pending Action: Create

    • New Request: Create

    • Result: The new create is added to the pending create.

      Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.

" }, "CacheSecurityGroupNames":{ "shape":"CacheSecurityGroupNameList", @@ -2728,7 +2877,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The upgraded version of the cache engine to be run on the cache nodes.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.

" + "documentation":"

The upgraded version of the cache engine to be run on the cache nodes.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -2852,7 +3001,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The upgraded version of the cache engine to be run on the clusters in the replication group.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.

" + "documentation":"

The upgraded version of the cache engine to be run on the clusters in the replication group.

Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -2872,7 +3021,8 @@ }, "NodeGroupId":{ "shape":"String", - "documentation":"

The name of the Node Group (called shard in the console).

" + "documentation":"

Deprecated. This parameter is not used.

", + "deprecated":true } }, "documentation":"

Represents the input of a ModifyReplicationGroups operation.

" @@ -2909,7 +3059,11 @@ }, "NodeGroupsToRemove":{ "shape":"NodeGroupsToRemoveList", - "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), NodeGroupsToRemove is a required list of node group ids to remove from the cluster.

" + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), the NodeGroupsToRemove or NodeGroupsToRetain is a required list of node group ids to remove from or retain in the cluster.

ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.

" + }, + "NodeGroupsToRetain":{ + "shape":"NodeGroupsToRetainList", + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), the NodeGroupsToRemove or NodeGroupsToRetain is a required list of node group ids to remove from or retain in the cluster.

ElastiCache for Redis will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster.

" } }, "documentation":"

Represents the input for a ModifyReplicationGroupShardConfiguration operation.

" @@ -2920,6 +3074,18 @@ "ReplicationGroup":{"shape":"ReplicationGroup"} } }, + "NoOperationFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The operation was not performed because no changes were required.

", + "error":{ + "code":"NoOperationFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "NodeGroup":{ "type":"structure", "members":{ @@ -2949,6 +3115,10 @@ "NodeGroupConfiguration":{ "type":"structure", "members":{ + "NodeGroupId":{ + "shape":"AllowedNodeGroupId", + "documentation":"

The 4-digit id for the node group these configuration values apply to.

" + }, "Slots":{ "shape":"String", "documentation":"

A string that specifies the keyspace for a particular node group. Keyspaces range from 0 to 16,383. The string is in the format startkey-endkey.

Example: \"0-3999\"

" @@ -2993,14 +3163,17 @@ "shape":"String", "documentation":"

The ID of the node within its cluster. A node ID is a numeric identifier (0001, 0002, etc.).

" }, - "ReadEndpoint":{"shape":"Endpoint"}, + "ReadEndpoint":{ + "shape":"Endpoint", + "documentation":"

The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Redis (cluster mode disabled) clusters.

" + }, "PreferredAvailabilityZone":{ "shape":"String", "documentation":"

The name of the Availability Zone in which the node is located.

" }, "CurrentRole":{ "shape":"String", - "documentation":"

The role that is currently assigned to the node - primary or replica.

" + "documentation":"

The role that is currently assigned to the node - primary or replica. This member is only applicable for Redis (cluster mode disabled) replication groups.

" } }, "documentation":"

Represents a single node within a node group (shard).

" @@ -3039,10 +3212,17 @@ "NodeGroupsToRemoveList":{ "type":"list", "member":{ - "shape":"String", + "shape":"AllowedNodeGroupId", "locationName":"NodeGroupToRemove" } }, + "NodeGroupsToRetainList":{ + "type":"list", + "member":{ + "shape":"AllowedNodeGroupId", + "locationName":"NodeGroupToRetain" + } + }, "NodeQuotaForClusterExceededFault":{ "type":"structure", "members":{ @@ -3164,7 +3344,7 @@ }, "ChangeType":{ "shape":"ChangeType", - "documentation":"

Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

" + "documentation":"

Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

" } }, "documentation":"

Describes an individual setting that controls some aspect of ElastiCache behavior.

" @@ -3213,7 +3393,7 @@ }, "CacheNodeIdsToRemove":{ "shape":"CacheNodeIdsList", - "documentation":"

A list of cache node IDs that are being removed (or will be removed) from the cluster. A node ID is a numeric identifier (0001, 0002, etc.).

" + "documentation":"

A list of cache node IDs that are being removed (or will be removed) from the cluster. A node ID is a 4-digit numeric identifier (0001, 0002, etc.).

" }, "EngineVersion":{ "shape":"String", @@ -3304,6 +3484,10 @@ "locationName":"RecurringCharge" } }, + "RemoveReplicasList":{ + "type":"list", + "member":{"shape":"String"} + }, "RemoveTagsFromResourceMessage":{ "type":"structure", "required":[ @@ -3322,6 +3506,13 @@ }, "documentation":"

Represents the input of a RemoveTagsFromResource operation.

" }, + "ReplicaConfigurationList":{ + "type":"list", + "member":{ + "shape":"ConfigureShard", + "locationName":"ConfigureShard" + } + }, "ReplicationGroup":{ "type":"structure", "members":{ @@ -3343,7 +3534,7 @@ }, "MemberClusters":{ "shape":"ClusterIdList", - "documentation":"

The identifiers of all the nodes that are part of this replication group.

" + "documentation":"

The names of all the cache clusters that are part of this replication group.

" }, "NodeGroups":{ "shape":"NodeGroupList", @@ -3383,11 +3574,11 @@ }, "TransitEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Default: false

" + "documentation":"

A flag that enables in-transit encryption when set to true.

You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

" }, "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", - "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Default: false

" + "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6 or 4.x.

Default: false

" } }, "documentation":"

Contains all of the attributes of a specific Redis replication group.

", @@ -3469,7 +3660,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "StartTime":{ "shape":"TStamp", @@ -3506,6 +3697,10 @@ "RecurringCharges":{ "shape":"RecurringChargeList", "documentation":"

The recurring price charged to run this reserved cache node.

" + }, + "ReservationARN":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the reserved cache node.

Example: arn:aws:elasticache:us-east-1:123456789012:reserved-instance:ri-2017-03-27-08-33-25-582

" } }, "documentation":"

Represents the output of a PurchaseReservedCacheNodesOffering operation.

", @@ -3577,7 +3772,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Duration":{ "shape":"Integer", @@ -3662,6 +3857,10 @@ "ReshardingConfiguration":{ "type":"structure", "members":{ + "NodeGroupId":{ + "shape":"AllowedNodeGroupId", + "documentation":"

The 4-digit id for the node group these configuration values apply to.

" + }, "PreferredAvailabilityZones":{ "shape":"AvailabilityZonesList", "documentation":"

A list of preferred availability zones for the nodes in this cluster.

" @@ -3740,6 +3939,18 @@ "type":"list", "member":{"shape":"SecurityGroupMembership"} }, + "ServiceLinkedRoleNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified service linked role (SLR) was not found.

", + "error":{ + "code":"ServiceLinkedRoleNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "SlotMigration":{ "type":"structure", "members":{ @@ -3779,7 +3990,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

" + "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      R4 node types; cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

Notes:

  • All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).

  • Redis (cluster mode disabled): Redis backup/restore is not supported on T1 and T2 instances.

  • Redis (cluster mode enabled): Backup/restore is not supported on T1 instances.

  • Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.

For a complete listing of node types and specifications, see:

" }, "Engine":{ "shape":"String", @@ -4034,7 +4245,7 @@ "documentation":"

The name of the replication group (console: cluster) whose automatic failover is being tested by this operation.

" }, "NodeGroupId":{ - "shape":"String", + "shape":"AllowedNodeGroupId", "documentation":"

The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 5 node groups in any rolling 24-hour period.

" } } @@ -4043,6 +4254,7 @@ "type":"structure", "members":{ }, + "documentation":"

The TestFailover action is not available.

", "error":{ "code":"TestFailoverNotAvailableFault", "httpStatusCode":400, diff --git a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json index c542f0e3..72d8127e 100644 --- a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json @@ -123,7 +123,7 @@ {"shape":"TooManyBucketsException"}, {"shape":"TooManyConfigurationTemplatesException"} ], - "documentation":"

Creates a configuration template. Templates are associated with a specific application and are used to deploy different versions of the application with the same configuration settings.

Related Topics

" + "documentation":"

Creates a configuration template. Templates are associated with a specific application and are used to deploy different versions of the application with the same configuration settings.

Templates aren't associated with any environment. The EnvironmentName response element is always null.

Related Topics

" }, "CreateEnvironment":{ "name":"CreateEnvironment", @@ -1045,11 +1045,11 @@ }, "Nice":{ "shape":"NullableDouble", - "documentation":"

Percentage of time that the CPU has spent in the Nice state over the last 10 seconds.

" + "documentation":"

Available on Linux environments only.

Percentage of time that the CPU has spent in the Nice state over the last 10 seconds.

" }, "System":{ "shape":"NullableDouble", - "documentation":"

Percentage of time that the CPU has spent in the System state over the last 10 seconds.

" + "documentation":"

Available on Linux environments only.

Percentage of time that the CPU has spent in the System state over the last 10 seconds.

" }, "Idle":{ "shape":"NullableDouble", @@ -1057,15 +1057,19 @@ }, "IOWait":{ "shape":"NullableDouble", - "documentation":"

Percentage of time that the CPU has spent in the I/O Wait state over the last 10 seconds.

" + "documentation":"

Available on Linux environments only.

Percentage of time that the CPU has spent in the I/O Wait state over the last 10 seconds.

" }, "IRQ":{ "shape":"NullableDouble", - "documentation":"

Percentage of time that the CPU has spent in the IRQ state over the last 10 seconds.

" + "documentation":"

Available on Linux environments only.

Percentage of time that the CPU has spent in the IRQ state over the last 10 seconds.

" }, "SoftIRQ":{ "shape":"NullableDouble", - "documentation":"

Percentage of time that the CPU has spent in the SoftIRQ state over the last 10 seconds.

" + "documentation":"

Available on Linux environments only.

Percentage of time that the CPU has spent in the SoftIRQ state over the last 10 seconds.

" + }, + "Privileged":{ + "shape":"NullableDouble", + "documentation":"

Available on Windows environments only.

Percentage of time that the CPU has spent in the Privileged state over the last 10 seconds.

" } }, "documentation":"

CPU utilization metrics for an instance.

" @@ -2047,7 +2051,7 @@ "members":{ "InstanceHealthList":{ "shape":"InstanceHealthList", - "documentation":"

Detailed health information about each instance.

" + "documentation":"

Detailed health information about each instance.

The output differs slightly between Linux and Windows environments. There is a difference in the members that are supported under the <CPUUtilization> type.

" }, "RefreshedAt":{ "shape":"RefreshedAt", diff --git a/botocore/data/elastictranscoder/2012-09-25/service-2.json b/botocore/data/elastictranscoder/2012-09-25/service-2.json index a213f9aa..980f4d4d 100644 --- a/botocore/data/elastictranscoder/2012-09-25/service-2.json +++ b/botocore/data/elastictranscoder/2012-09-25/service-2.json @@ -1,13 +1,13 @@ { "version":"2.0", "metadata":{ - "uid":"elastictranscoder-2012-09-25", "apiVersion":"2012-09-25", "endpointPrefix":"elastictranscoder", "protocol":"rest-json", "serviceFullName":"Amazon Elastic Transcoder", "serviceId":"Elastic Transcoder", - "signatureVersion":"v4" + "signatureVersion":"v4", + "uid":"elastictranscoder-2012-09-25" }, "operations":{ "CancelJob":{ @@ -768,11 +768,11 @@ }, "AwsKmsKeyArn":{ "shape":"KeyArn", - "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either s3 or s3-aws-kms as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of aes-cbc-pkcs7, aes-ctr, or aes-gcm.

" }, "Notifications":{ "shape":"Notifications", - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.

  • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

" + "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.

  • Complete: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

" }, "ContentConfig":{ "shape":"PipelineOutputConfig", @@ -929,7 +929,7 @@ "members":{ "Mode":{ "shape":"EncryptionMode", - "documentation":"

The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options:

  • S3: Amazon S3 creates and manages the keys used for encrypting your files.

  • S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates and manages the keys that are used for encrypting your files. If you specify S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS key that you want to use to your pipeline.

  • AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for HLS files.

  • AES-CTR: AES Counter Mode.

  • AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated encryption format, meaning that a file, key, or initialization vector that has been tampered with fails the decryption process.

For all three AES options, you must provide the following settings, which must be base64-encoded:

  • Key

  • Key MD5

  • Initialization Vector

For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.

" + "documentation":"

The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options:

  • s3: Amazon S3 creates and manages the keys used for encrypting your files.

  • s3-aws-kms: Amazon S3 calls the Amazon Key Management Service, which creates and manages the keys that are used for encrypting your files. If you specify s3-aws-kms and you don't want to use the default key, you must add the AWS-KMS key that you want to use to your pipeline.

  • aes-cbc-pkcs7: A padded cipher-block mode of operation originally used for HLS files.

  • aes-ctr: AES Counter Mode.

  • aes-gcm: AES Galois Counter Mode, a mode of operation that is an authenticated encryption format, meaning that a file, key, or initialization vector that has been tampered with fails the decryption process.

For all three AES options, you must provide the following settings, which must be base64-encoded:

  • Key

  • Key MD5

  • Initialization Vector

For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.

" }, "Key":{ "shape":"Base64EncodedString", @@ -1176,7 +1176,7 @@ "JobInputs":{ "type":"list", "member":{"shape":"JobInput"}, - "max":10000 + "max":200 }, "JobOutput":{ "type":"structure", @@ -1599,11 +1599,11 @@ }, "AwsKmsKeyArn":{ "shape":"KeyArn", - "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either s3 or s3-aws-kms as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of aes-cbc-pkcs7, aes-ctr, or aes-gcm.

" }, "Notifications":{ "shape":"Notifications", - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing (optional): The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.

  • Completed (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

  • Warning (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.

  • Error (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

" + "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing (optional): The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.

  • Complete (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

  • Warning (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.

  • Error (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

" }, "ContentConfig":{ "shape":"PipelineOutputConfig", @@ -1674,7 +1674,7 @@ "documentation":"

The location of the license key required to play DRM content. The URL must be an absolute path, and is referenced by the PlayReady header. The PlayReady header is referenced in the protection header of the client manifest for Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/

" } }, - "documentation":"

The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

PlayReady DRM encrypts your media files using AES-CTR encryption.

If you use DRM for an HLSv3 playlist, your outputs must have a master playlist.

" + "documentation":"

The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

PlayReady DRM encrypts your media files using aes-ctr encryption.

If you use DRM for an HLSv3 playlist, your outputs must have a master playlist.

" }, "PlayReadyDrmFormatString":{ "type":"string", @@ -1766,7 +1766,7 @@ }, "PresetContainer":{ "type":"string", - "pattern":"(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^flac$)|(^oga$)|(^ogg$)|(^fmp4$)|(^mpg$)|(^flv$)|(^gif$)|(^mxf$)|(^wav$)" + "pattern":"(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^flac$)|(^oga$)|(^ogg$)|(^fmp4$)|(^mpg$)|(^flv$)|(^gif$)|(^mxf$)|(^wav$)|(^mp2$)" }, "PresetType":{ "type":"string", @@ -2103,7 +2103,7 @@ }, "Notifications":{ "shape":"Notifications", - "documentation":"

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process jobs that are added to this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job. This is the ARN that Amazon SNS returned when you created the topic.

  • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. This is the ARN that Amazon SNS returned when you created the topic.

  • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition. This is the ARN that Amazon SNS returned when you created the topic.

" + "documentation":"

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process jobs that are added to this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Complete: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job. This is the ARN that Amazon SNS returned when you created the topic.

  • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. This is the ARN that Amazon SNS returned when you created the topic.

  • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition. This is the ARN that Amazon SNS returned when you created the topic.

" } }, "documentation":"

The UpdatePipelineNotificationsRequest structure.

" @@ -2142,11 +2142,11 @@ }, "AwsKmsKeyArn":{ "shape":"KeyArn", - "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either s3 or s3-aws-kms as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of aes-cbc-pkcs7, aes-ctr, or aes-gcm.

" }, "Notifications":{ "shape":"Notifications", - "documentation":"

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process jobs that are added to this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job. This is the ARN that Amazon SNS returned when you created the topic.

  • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. This is the ARN that Amazon SNS returned when you created the topic.

  • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition. This is the ARN that Amazon SNS returned when you created the topic.

" + "documentation":"

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

  • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process jobs that are added to this pipeline. This is the ARN that Amazon SNS returned when you created the topic.

  • Complete: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job. This is the ARN that Amazon SNS returned when you created the topic.

  • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. This is the ARN that Amazon SNS returned when you created the topic.

  • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition. This is the ARN that Amazon SNS returned when you created the topic.

" }, "ContentConfig":{ "shape":"PipelineOutputConfig", diff --git a/botocore/data/elb/2012-06-01/service-2.json b/botocore/data/elb/2012-06-01/service-2.json index 5ff2b5eb..da1a0da7 100644 --- a/botocore/data/elb/2012-06-01/service-2.json +++ b/botocore/data/elb/2012-06-01/service-2.json @@ -27,7 +27,7 @@ {"shape":"TooManyTagsException"}, {"shape":"DuplicateTagKeysException"} ], - "documentation":"

Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

For more information, see Tag Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

For more information, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "ApplySecurityGroupsToLoadBalancer":{ "name":"ApplySecurityGroupsToLoadBalancer", @@ -45,7 +45,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"InvalidSecurityGroupException"} ], - "documentation":"

Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancer Guide.

" + "documentation":"

Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancers Guide.

" }, "AttachLoadBalancerToSubnets":{ "name":"AttachLoadBalancerToSubnets", @@ -64,7 +64,7 @@ {"shape":"SubnetNotFoundException"}, {"shape":"InvalidSubnetException"} ], - "documentation":"

Adds one or more subnets to the set of configured subnets for the specified load balancer.

The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Classic Load Balancer Guide.

" + "documentation":"

Adds one or more subnets to the set of configured subnets for the specified load balancer.

The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Classic Load Balancers Guide.

" }, "ConfigureHealthCheck":{ "name":"ConfigureHealthCheck", @@ -80,7 +80,7 @@ "errors":[ {"shape":"AccessPointNotFoundException"} ], - "documentation":"

Specifies the health check settings to use when evaluating the health state of your EC2 instances.

For more information, see Configure Health Checks for Your Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Specifies the health check settings to use when evaluating the health state of your EC2 instances.

For more information, see Configure Health Checks for Your Load Balancer in the Classic Load Balancers Guide.

" }, "CreateAppCookieStickinessPolicy":{ "name":"CreateAppCookieStickinessPolicy", @@ -99,7 +99,7 @@ {"shape":"TooManyPoliciesException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners.

This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.

If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued.

For more information, see Application-Controlled Session Stickiness in the Classic Load Balancer Guide.

" + "documentation":"

Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners.

This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.

If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued.

For more information, see Application-Controlled Session Stickiness in the Classic Load Balancers Guide.

" }, "CreateLBCookieStickinessPolicy":{ "name":"CreateLBCookieStickinessPolicy", @@ -118,7 +118,7 @@ {"shape":"TooManyPoliciesException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners.

When a load balancer implements this policy, the load balancer uses a special cookie to track the instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm.

A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration.

For more information, see Duration-Based Session Stickiness in the Classic Load Balancer Guide.

" + "documentation":"

Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners.

When a load balancer implements this policy, the load balancer uses a special cookie to track the instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm.

A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration.

For more information, see Duration-Based Session Stickiness in the Classic Load Balancers Guide.

" }, "CreateLoadBalancer":{ "name":"CreateLoadBalancer", @@ -145,7 +145,7 @@ {"shape":"UnsupportedProtocolException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Creates a Classic Load Balancer.

You can add listeners, security groups, subnets, and tags when you create your load balancer, or you can add them later using CreateLoadBalancerListeners, ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags.

To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Creates a Classic Load Balancer.

You can add listeners, security groups, subnets, and tags when you create your load balancer, or you can add them later using CreateLoadBalancerListeners, ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags.

To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "CreateLoadBalancerListeners":{ "name":"CreateLoadBalancerListeners", @@ -165,7 +165,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"UnsupportedProtocolException"} ], - "documentation":"

Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "CreateLoadBalancerPolicy":{ "name":"CreateLoadBalancerPolicy", @@ -248,7 +248,7 @@ {"shape":"AccessPointNotFoundException"}, {"shape":"InvalidEndPointException"} ], - "documentation":"

Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer.

You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancer Guide.

" + "documentation":"

Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer.

You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide.

" }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", @@ -261,7 +261,7 @@ "shape":"DescribeAccountLimitsOutput", "resultWrapper":"DescribeAccountLimitsResult" }, - "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "DescribeInstanceHealth":{ "name":"DescribeInstanceHealth", @@ -395,7 +395,7 @@ {"shape":"AccessPointNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer.

There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the OutOfService state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.

For more information, see Add or Remove Availability Zones in the Classic Load Balancer Guide.

" + "documentation":"

Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC.

For load balancers in a non-default VPC, use DetachLoadBalancerFromSubnets.

There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the OutOfService state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.

For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide.

" }, "EnableAvailabilityZonesForLoadBalancer":{ "name":"EnableAvailabilityZonesForLoadBalancer", @@ -411,7 +411,7 @@ "errors":[ {"shape":"AccessPointNotFoundException"} ], - "documentation":"

Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer.

The load balancer evenly distributes requests across all its registered Availability Zones that contain instances.

For more information, see Add or Remove Availability Zones in the Classic Load Balancer Guide.

" + "documentation":"

Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC.

For load balancers in a non-default VPC, use AttachLoadBalancerToSubnets.

The load balancer evenly distributes requests across all its registered Availability Zones that contain instances. For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide.

" }, "ModifyLoadBalancerAttributes":{ "name":"ModifyLoadBalancerAttributes", @@ -429,7 +429,7 @@ {"shape":"LoadBalancerAttributeNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Modifies the attributes of the specified load balancer.

You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings by specifying an idle connection timeout value for your load balancer.

For more information, see the following in the Classic Load Balancer Guide:

" + "documentation":"

Modifies the attributes of the specified load balancer.

You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings by specifying an idle connection timeout value for your load balancer.

For more information, see the following in the Classic Load Balancers Guide:

" }, "RegisterInstancesWithLoadBalancer":{ "name":"RegisterInstancesWithLoadBalancer", @@ -446,7 +446,7 @@ {"shape":"AccessPointNotFoundException"}, {"shape":"InvalidEndPointException"} ], - "documentation":"

Adds the specified instances to the specified load balancer.

The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.

Note that RegisterInstanceWithLoadBalancer completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.

After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the OutOfService state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService state.

To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancer Guide.

" + "documentation":"

Adds the specified instances to the specified load balancer.

The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.

Note that RegisterInstanceWithLoadBalancer completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.

After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the OutOfService state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService state.

To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide.

" }, "RemoveTags":{ "name":"RemoveTags", @@ -482,7 +482,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"UnsupportedProtocolException"} ], - "documentation":"

Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port.

For more information about updating your SSL certificate, see Replace the SSL Certificate for Your Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port.

For more information about updating your SSL certificate, see Replace the SSL Certificate for Your Load Balancer in the Classic Load Balancers Guide.

" }, "SetLoadBalancerPoliciesForBackendServer":{ "name":"SetLoadBalancerPoliciesForBackendServer", @@ -500,7 +500,7 @@ {"shape":"PolicyNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the instance ports; this policy type is composed of multiple public key policies.

Each time you use SetLoadBalancerPoliciesForBackendServer to enable the policies, use the PolicyNames parameter to list the policies that you want to enable.

You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the EC2 instance.

For more information about enabling back-end instance authentication, see Configure Back-end Instance Authentication in the Classic Load Balancer Guide. For more information about Proxy Protocol, see Configure Proxy Protocol Support in the Classic Load Balancer Guide.

" + "documentation":"

Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the instance ports; this policy type is composed of multiple public key policies.

Each time you use SetLoadBalancerPoliciesForBackendServer to enable the policies, use the PolicyNames parameter to list the policies that you want to enable.

You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the EC2 instance.

For more information about enabling back-end instance authentication, see Configure Back-end Instance Authentication in the Classic Load Balancers Guide. For more information about Proxy Protocol, see Configure Proxy Protocol Support in the Classic Load Balancers Guide.

" }, "SetLoadBalancerPoliciesOfListener":{ "name":"SetLoadBalancerPoliciesOfListener", @@ -519,7 +519,7 @@ {"shape":"ListenerNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Replaces the current set of policies for the specified load balancer port with the specified set of policies.

To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer.

For more information about setting policies, see Update the SSL Negotiation Configuration, Duration-Based Session Stickiness, and Application-Controlled Session Stickiness in the Classic Load Balancer Guide.

" + "documentation":"

Replaces the current set of policies for the specified load balancer port with the specified set of policies.

To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer.

For more information about setting policies, see Update the SSL Negotiation Configuration, Duration-Based Session Stickiness, and Application-Controlled Session Stickiness in the Classic Load Balancers Guide.

" } }, "shapes":{ @@ -828,7 +828,7 @@ }, "Listeners":{ "shape":"Listeners", - "documentation":"

The listeners.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

The listeners.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -848,7 +848,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to assign to the load balancer.

For more information about tagging your load balancer, see Tag Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

A list of tags to assign to the load balancer.

For more information about tagging your load balancer, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide.

" } }, "documentation":"

Contains the parameters for CreateLoadBalancer.

" @@ -1061,6 +1061,7 @@ "type":"structure", "members":{ }, + "documentation":"

A request made by Elastic Load Balancing to another service exceeds the maximum request rate permitted for your account.

", "error":{ "code":"DependencyThrottle", "httpStatusCode":400, @@ -1526,7 +1527,7 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

The name of the limit. The possible values are:

  • classic-listeners

  • classic-load-balancers

" + "documentation":"

The name of the limit. The possible values are:

  • classic-listeners

  • classic-load-balancers

  • classic-registered-instances

" }, "Max":{ "shape":"Max", @@ -1568,7 +1569,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the server certificate.

" } }, - "documentation":"

Information about a listener.

For information about the protocols and the ports supported by Elastic Load Balancing, see Listeners for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Information about a listener.

For information about the protocols and the ports supported by Elastic Load Balancing, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "ListenerDescription":{ "type":"structure", @@ -1621,19 +1622,19 @@ "members":{ "CrossZoneLoadBalancing":{ "shape":"CrossZoneLoadBalancing", - "documentation":"

If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones.

For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones.

For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancers Guide.

" }, "AccessLog":{ "shape":"AccessLog", - "documentation":"

If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify.

For more information, see Enable Access Logs in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify.

For more information, see Enable Access Logs in the Classic Load Balancers Guide.

" }, "ConnectionDraining":{ "shape":"ConnectionDraining", - "documentation":"

If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance.

For more information, see Configure Connection Draining in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance.

For more information, see Configure Connection Draining in the Classic Load Balancers Guide.

" }, "ConnectionSettings":{ "shape":"ConnectionSettings", - "documentation":"

If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.

By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.

By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancers Guide.

" }, "AdditionalAttributes":{ "shape":"AdditionalAttributes", @@ -1655,7 +1656,7 @@ }, "CanonicalHostedZoneName":{ "shape":"DNSName", - "documentation":"

The DNS name of the load balancer.

For more information, see Configure a Custom Domain Name in the Classic Load Balancer Guide.

" + "documentation":"

The DNS name of the load balancer.

For more information, see Configure a Custom Domain Name in the Classic Load Balancers Guide.

" }, "CanonicalHostedZoneNameID":{ "shape":"DNSName", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index f201ae40..136e2e9b 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -849,6 +849,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -1036,6 +1037,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1160,6 +1162,15 @@ "us-west-2" : { } } }, + "iotanalytics" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "kinesis" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1711,11 +1722,36 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "secretsmanager-fips.us-east-1.amazonaws.com" + }, "us-east-2" : { }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "secretsmanager-fips.us-east-2.amazonaws.com" + }, "us-west-1" : { }, - "us-west-2" : { } + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "secretsmanager-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "secretsmanager-fips.us-west-2.amazonaws.com" + } } }, "serverlessrepo" : { @@ -1788,7 +1824,17 @@ }, "servicediscovery" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -2106,8 +2152,26 @@ "endpoints" : { "eu-west-1" : { }, "us-east-1" : { }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "translate-fips.us-east-1.amazonaws.com" + }, "us-east-2" : { }, - "us-west-2" : { } + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "translate-fips.us-east-2.amazonaws.com" + }, + "us-west-2" : { }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "translate-fips.us-west-2.amazonaws.com" + } } }, "waf" : { @@ -2247,6 +2311,12 @@ "cn-northwest-1" : { } } }, + "codebuild" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "codedeploy" : { "endpoints" : { "cn-north-1" : { }, @@ -2536,6 +2606,11 @@ "us-gov-west-1" : { } } }, + "application-autoscaling" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "autoscaling" : { "endpoints" : { "us-gov-west-1" : { @@ -2847,7 +2922,13 @@ "protocols" : [ "https" ] }, "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "translate-fips.us-gov-west-1.amazonaws.com" + } } } } diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index 920507a9..e14e7a8f 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -545,6 +545,10 @@ "shape":"EncryptionAtRestOptions", "documentation":"

Specifies the Encryption At Rest Options.

" }, + "NodeToNodeEncryptionOptions":{ + "shape":"NodeToNodeEncryptionOptions", + "documentation":"

Specifies the NodeToNodeEncryptionOptions.

" + }, "AdvancedOptions":{ "shape":"AdvancedOptions", "documentation":"

Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.

" @@ -978,6 +982,10 @@ "shape":"EncryptionAtRestOptionsStatus", "documentation":"

Specifies the EncryptionAtRestOptions for the Elasticsearch domain.

" }, + "NodeToNodeEncryptionOptions":{ + "shape":"NodeToNodeEncryptionOptionsStatus", + "documentation":"

Specifies the NodeToNodeEncryptionOptions for the Elasticsearch domain.

" + }, "AdvancedOptions":{ "shape":"AdvancedOptionsStatus", "documentation":"

Specifies the AdvancedOptions for the domain. See Configuring Advanced Options for more information.

" @@ -1063,6 +1071,10 @@ "shape":"EncryptionAtRestOptions", "documentation":"

Specifies the status of the EncryptionAtRestOptions.

" }, + "NodeToNodeEncryptionOptions":{ + "shape":"NodeToNodeEncryptionOptions", + "documentation":"

Specifies the status of the NodeToNodeEncryptionOptions.

" + }, "AdvancedOptions":{ "shape":"AdvancedOptions", "documentation":"

Specifies the status of the AdvancedOptions

" @@ -1486,6 +1498,34 @@ "type":"string", "documentation":"

Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.

" }, + "NodeToNodeEncryptionOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Specify true to enable node-to-node encryption.

" + } + }, + "documentation":"

Specifies the node-to-node encryption options.

" + }, + "NodeToNodeEncryptionOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{ + "shape":"NodeToNodeEncryptionOptions", + "documentation":"

Specifies the node-to-node encryption options for the specified Elasticsearch domain.

" + }, + "Status":{ + "shape":"OptionStatus", + "documentation":"

Specifies the status of the node-to-node encryption options for the specified Elasticsearch domain.

" + } + }, + "documentation":"

Status of the node-to-node encryption options for the specified Elasticsearch domain.

" + }, "OptionState":{ "type":"string", "documentation":"

The state of a requested change. One of the following:

  • Processing: The request change is still in-process.
  • Active: The request change is processed and deployed to the Elasticsearch domain.
", diff --git a/botocore/data/events/2014-02-03/service-2.json b/botocore/data/events/2014-02-03/service-2.json index e6cf2a40..cbd4d60d 100644 --- a/botocore/data/events/2014-02-03/service-2.json +++ b/botocore/data/events/2014-02-03/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"events", "jsonVersion":"1.1", "serviceFullName":"Amazon CloudWatch Events", + "serviceId":"CloudWatch Events", "signatureVersion":"v4", "targetPrefix":"AWSEvents", "protocol":"json" diff --git a/botocore/data/events/2015-10-07/service-2.json b/botocore/data/events/2015-10-07/service-2.json index 0c7c5edf..a5556537 100644 --- a/botocore/data/events/2015-10-07/service-2.json +++ b/botocore/data/events/2015-10-07/service-2.json @@ -23,7 +23,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalException"} ], - "documentation":"

Deletes the specified rule.

You must remove all targets from a rule using RemoveTargets before you can delete the rule.

When you delete a rule, incoming events might continue to match to the deleted rule. Please allow a short period of time for changes to take effect.

" + "documentation":"

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

" }, "DescribeEventBus":{ "name":"DescribeEventBus", @@ -51,7 +51,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"} ], - "documentation":"

Describes the specified rule.

" + "documentation":"

Describes the specified rule.

DescribeRule does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

" }, "DisableRule":{ "name":"DisableRule", @@ -65,7 +65,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalException"} ], - "documentation":"

Disables the specified rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

When you disable a rule, incoming events might continue to match to the disabled rule. Please allow a short period of time for changes to take effect.

" + "documentation":"

Disables the specified rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

When you disable a rule, incoming events might continue to match to the disabled rule. Allow a short period of time for changes to take effect.

" }, "EnableRule":{ "name":"EnableRule", @@ -79,7 +79,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalException"} ], - "documentation":"

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Please allow a short period of time for changes to take effect.

" + "documentation":"

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

" }, "ListRuleNamesByTarget":{ "name":"ListRuleNamesByTarget", @@ -105,7 +105,7 @@ "errors":[ {"shape":"InternalException"} ], - "documentation":"

Lists your Amazon CloudWatch Events rules. You can either list all the rules or you can provide a prefix to match to the rule names.

" + "documentation":"

Lists your Amazon CloudWatch Events rules. You can either list all the rules or you can provide a prefix to match to the rule names.

ListRules does not list the targets of a rule. To see the targets associated with a rule, use ListTargetsByRule.

" }, "ListTargetsByRule":{ "name":"ListTargetsByRule", @@ -147,7 +147,7 @@ {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Running PutPermission permits the specified AWS account to put events to your account's default event bus. CloudWatch Events rules in your account are triggered by these events arriving to your default event bus.

For another account to send events to your account, that external account must have a CloudWatch Events rule with your account's default event bus as a target.

To enable multiple AWS accounts to put events to your default event bus, run PutPermission once for each of these accounts.

The permission policy on the default event bus cannot exceed 10KB in size.

" + "documentation":"

Running PutPermission permits the specified AWS account to put events to your account's default event bus. CloudWatch Events rules in your account are triggered by these events arriving to your default event bus.

For another account to send events to your account, that external account must have a CloudWatch Events rule with your account's default event bus as a target.

To enable multiple AWS accounts to put events to your default event bus, run PutPermission once for each of these accounts.

The permission policy on the default event bus cannot exceed 10 KB in size.

" }, "PutRule":{ "name":"PutRule", @@ -163,7 +163,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalException"} ], - "documentation":"

Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.

If you are updating an existing rule, the rule is completely replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments are not kept. Instead, they are replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Please allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

" + "documentation":"

Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.

If you are updating an existing rule, the rule is replaced with what you specify in this PutRule command. If you omit arguments in PutRule, the old values for those arguments are not kept. Instead, they are replaced with null values.

When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.

A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.

Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

" }, "PutTargets":{ "name":"PutTargets", @@ -179,7 +179,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalException"} ], - "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for CloudWatch Events:

  • EC2 instances

  • AWS Lambda functions

  • Streams in Amazon Kinesis Streams

  • Delivery streams in Amazon Kinesis Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • Pipelines in Amazon Code Pipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Note that creating rules with built-in targets is supported only in the AWS Management Console.

For some target types, PutTargets provides target-specific parameters. If the target is an Amazon Kinesis stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For EC2 instances, Amazon Kinesis streams, and AWS Step Functions state machines, CloudWatch Events relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon CloudWatch Events User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account by setting that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to antoher account is charged as a custom event. The account receiving the event is not charged. For more information on pricing, see Amazon CloudWatch Pricing.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON form (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Please allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" + "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for CloudWatch Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, CloudWatch Events relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon CloudWatch Events User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon CloudWatch Pricing.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -208,7 +208,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalException"} ], - "documentation":"

Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.

When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Please allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" + "documentation":"

Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.

When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, "TestEventPattern":{ "name":"TestEventPattern", @@ -237,6 +237,32 @@ "max":1600, "min":1 }, + "AssignPublicIp":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "AwsVpcConfiguration":{ + "type":"structure", + "required":["Subnets"], + "members":{ + "Subnets":{ + "shape":"StringList", + "documentation":"

Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.

" + }, + "SecurityGroups":{ + "shape":"StringList", + "documentation":"

Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.

" + }, + "AssignPublicIp":{ + "shape":"AssignPublicIp", + "documentation":"

Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE.

" + } + }, + "documentation":"

This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

" + }, "BatchArrayProperties":{ "type":"structure", "members":{ @@ -268,7 +294,7 @@ }, "RetryStrategy":{ "shape":"BatchRetryStrategy", - "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1 to 10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" + "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. The retry strategy is the number of times to retry the failed job execution. Valid values are 1–10. When you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" } }, "documentation":"

The custom parameters to be used when the target is an AWS Batch job.

" @@ -278,7 +304,7 @@ "members":{ "Attempts":{ "shape":"Integer", - "documentation":"

The number of times to attempt to retry, if the job fails. Valid values are 1 to 10.

" + "documentation":"

The number of times to attempt to retry, if the job fails. Valid values are 1–10.

" } }, "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" @@ -382,14 +408,30 @@ "members":{ "TaskDefinitionArn":{ "shape":"Arn", - "documentation":"

The ARN of the task definition to use if the event target is an Amazon ECS cluster.

" + "documentation":"

The ARN of the task definition to use if the event target is an Amazon ECS task.

" }, "TaskCount":{ "shape":"LimitMin1", - "documentation":"

The number of tasks to create based on the TaskDefinition. The default is one.

" + "documentation":"

The number of tasks to create based on TaskDefinition. The default is 1.

" + }, + "LaunchType":{ + "shape":"LaunchType", + "documentation":"

Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. For more information, see AWS Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + }, + "NetworkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

Use this structure if the ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks.

If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails.

" + }, + "PlatformVersion":{ + "shape":"String", + "documentation":"

Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0.

This structure is used only if LaunchType is FARGATE. For more information about valid platform versions, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + }, + "Group":{ + "shape":"String", + "documentation":"

Specifies an ECS task group for the task. The maximum length is 255 characters.

" } }, - "documentation":"

The custom parameters to be used when the target is an Amazon ECS cluster.

" + "documentation":"

The custom parameters to be used when the target is an Amazon ECS task.

" }, "EnableRuleRequest":{ "type":"structure", @@ -417,11 +459,11 @@ "members":{ "InputPathsMap":{ "shape":"TransformerPaths", - "documentation":"

Map of JSON paths to be extracted from the event. These are key-value pairs, where each value is a JSON path. You must use JSON dot notation, not bracket notation.

" + "documentation":"

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 10 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys cannot start with \"AWS.\"

" }, "InputTemplate":{ "shape":"TransformerInput", - "documentation":"

Input template where you can use the values of the keys from InputPathsMap to customize the data sent to the target.

" + "documentation":"

Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value> The InputTemplate must be valid JSON.

If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:

  • The placeholder cannot be used as an object key.

  • Object values cannot include quote marks.

The following example shows the syntax for using InputPathsMap and InputTemplate.

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": \"<instance> is in state <status>\"

}

To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": \"<instance> is in state \\\"<status>\\\"\"

}

" } }, "documentation":"

Contains the parameters needed for you to provide custom input to a target based on one or more pieces of data extracted from the event.

" @@ -457,7 +499,14 @@ "documentation":"

The JSON path to be extracted from the event and used as the partition key. For more information, see Amazon Kinesis Streams Key Concepts in the Amazon Kinesis Streams Developer Guide.

" } }, - "documentation":"

This object enables you to specify a JSON path to extract from the event and use as the partition key for the Amazon Kinesis stream, so that you can control the shard to which the event goes. If you do not include this parameter, the default is to use the eventId as the partition key.

" + "documentation":"

This object enables you to specify a JSON path to extract from the event and use as the partition key for the Amazon Kinesis data stream, so that you can control the shard to which the event goes. If you do not include this parameter, the default is to use the eventId as the partition key.

" + }, + "LaunchType":{ + "type":"string", + "enum":[ + "EC2", + "FARGATE" + ] }, "LimitExceededException":{ "type":"structure", @@ -568,6 +617,16 @@ } }, "MessageGroupId":{"type":"string"}, + "NetworkConfiguration":{ + "type":"structure", + "members":{ + "awsvpcConfiguration":{ + "shape":"AwsVpcConfiguration", + "documentation":"

Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.

" + } + }, + "documentation":"

This structure specifies the network configuration for an ECS task.

" + }, "NextToken":{ "type":"string", "max":2048, @@ -601,11 +660,11 @@ "members":{ "Time":{ "shape":"EventTime", - "documentation":"

The timestamp of the event, per RFC3339. If no timestamp is provided, the timestamp of the PutEvents call is used.

" + "documentation":"

The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used.

" }, "Source":{ "shape":"String", - "documentation":"

The source of the event.

" + "documentation":"

The source of the event. This field is required.

" }, "Resources":{ "shape":"EventResourceList", @@ -987,6 +1046,10 @@ "pattern":"[a-zA-Z0-9-_]+" }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, "Target":{ "type":"structure", "required":[ @@ -1020,7 +1083,7 @@ }, "KinesisParameters":{ "shape":"KinesisParameters", - "documentation":"

The custom parameter you can use to control shard assignment, when the target is an Amazon Kinesis stream. If you do not include this parameter, the default is to use the eventId as the partition key.

" + "documentation":"

The custom parameter you can use to control the shard assignment, when the target is a Kinesis data stream. If you do not include this parameter, the default is to use the eventId as the partition key.

" }, "RunCommandParameters":{ "shape":"RunCommandParameters", @@ -1032,14 +1095,14 @@ }, "BatchParameters":{ "shape":"BatchParameters", - "documentation":"

Contains the job definition, job name, and other parameters if the event target is an AWS Batch job. For more information about AWS Batch, see Jobs in the AWS Batch User Guide.

" + "documentation":"

If the event target is an AWS Batch job, this contains the job definition, job name, and other parameters. For more information, see Jobs in the AWS Batch User Guide.

" }, "SqsParameters":{ "shape":"SqsParameters", - "documentation":"

Contains the message group ID to use when the target is a FIFO queue.

" + "documentation":"

Contains the message group ID to use when the target is a FIFO queue.

If you specify an SQS FIFO queue as a target, the queue must have content-based deduplication enabled.

" } }, - "documentation":"

Targets are the resources to be invoked when a rule is triggered. Target types include EC2 instances, AWS Lambda functions, Amazon Kinesis streams, Amazon ECS tasks, AWS Step Functions state machines, Run Command, and built-in targets.

" + "documentation":"

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

" }, "TargetArn":{ "type":"string", @@ -1114,5 +1177,5 @@ "max":10 } }, - "documentation":"

Amazon CloudWatch Events helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a pre-determined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from CloudTrail to an Amazon Kinesis stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon CloudWatch Events, see the Amazon CloudWatch Events User Guide.

" + "documentation":"

Amazon CloudWatch Events helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a pre-determined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon CloudWatch Events, see the Amazon CloudWatch Events User Guide.

" } diff --git a/botocore/data/fms/2018-01-01/service-2.json b/botocore/data/fms/2018-01-01/service-2.json index dc3df2d5..9022388e 100644 --- a/botocore/data/fms/2018-01-01/service-2.json +++ b/botocore/data/fms/2018-01-01/service-2.json @@ -26,7 +26,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Sets the AWS Firewall Manager administrator account. AWS Firewall Manager must be associated with a master account in AWS Organizations or associated with a member account that has the appropriate permissions. If the account ID that you submit is not an AWS Organizations master account, AWS Firewall Manager will set the appropriate permissions for the given member account.

The account that you associate with AWS Firewall Manager is called the AWS Firewall manager administrator account.

" + "documentation":"

Sets the AWS Firewall Manager administrator account. AWS Firewall Manager must be associated with the master account your AWS organization or associated with a member account that has the appropriate permissions. If the account ID that you submit is not an AWS Organizations master account, AWS Firewall Manager will set the appropriate permissions for the given member account.

The account that you associate with AWS Firewall Manager is called the AWS Firewall Manager administrator account.

" }, "DeleteNotificationChannel":{ "name":"DeleteNotificationChannel", @@ -125,7 +125,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidOperationException"}, - {"shape":"InternalErrorException"} + {"shape":"InternalErrorException"}, + {"shape":"InvalidTypeException"} ], "documentation":"

Returns information about the specified AWS Firewall Manager policy.

" }, @@ -143,6 +144,20 @@ ], "documentation":"

Returns an array of PolicyComplianceStatus objects in the response. Use PolicyComplianceStatus to get a summary of which member accounts are protected by the specified policy.

" }, + "ListMemberAccounts":{ + "name":"ListMemberAccounts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMemberAccountsRequest"}, + "output":{"shape":"ListMemberAccountsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Returns a MemberAccounts object that lists the member accounts in the administrator's AWS organization.

The ListMemberAccounts must be submitted by the account that is set as the AWS Firewall Manager administrator.

" + }, "ListPolicies":{ "name":"ListPolicies", "http":{ @@ -185,7 +200,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidOperationException"}, {"shape":"InvalidInputException"}, - {"shape":"InternalErrorException"} + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidTypeException"} ], "documentation":"

Creates an AWS Firewall Manager policy.

" } @@ -196,6 +213,16 @@ "max":1024, "min":1 }, + "AccountRoleStatus":{ + "type":"string", + "enum":[ + "READY", + "CREATING", + "PENDING_DELETION", + "DELETING", + "DELETED" + ] + }, "AssociateAdminAccountRequest":{ "type":"structure", "required":["AdminAccount"], @@ -229,6 +256,24 @@ "type":"list", "member":{"shape":"ComplianceViolator"} }, + "CustomerPolicyScopeId":{ + "type":"string", + "max":1024, + "min":1 + }, + "CustomerPolicyScopeIdList":{ + "type":"list", + "member":{"shape":"CustomerPolicyScopeId"} + }, + "CustomerPolicyScopeIdType":{ + "type":"string", + "enum":["ACCOUNT"] + }, + "CustomerPolicyScopeMap":{ + "type":"map", + "key":{"shape":"CustomerPolicyScopeIdType"}, + "value":{"shape":"CustomerPolicyScopeIdList"} + }, "DeleteNotificationChannelRequest":{ "type":"structure", "members":{ @@ -244,6 +289,18 @@ } } }, + "DependentServiceName":{ + "type":"string", + "enum":[ + "AWSCONFIG", + "AWSWAF" + ] + }, + "DetailedInfo":{ + "type":"string", + "max":1024, + "min":1 + }, "DisassociateAdminAccountRequest":{ "type":"structure", "members":{ @@ -283,6 +340,10 @@ "AdminAccount":{ "shape":"AWSAccountId", "documentation":"

The AWS account that is set as the AWS Firewall Manager administrator.

" + }, + "RoleStatus":{ + "shape":"AccountRoleStatus", + "documentation":"

The status of the AWS account that you set as the AWS Firewall Manager administrator.

" } } }, @@ -377,6 +438,19 @@ "documentation":"

The operation failed because there was nothing to do. For example, you might have submitted an AssociateAdminAccount request, but the account ID that you submitted was already set as the AWS Firewall Manager administrator.

", "exception":true }, + "InvalidTypeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The value of the Type parameter is invalid.

", + "exception":true + }, + "IssueInfoMap":{ + "type":"map", + "key":{"shape":"DependentServiceName"}, + "value":{"shape":"DetailedInfo"} + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -416,6 +490,32 @@ } } }, + "ListMemberAccountsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you specify a value for MaxResults and you have more account IDs than the number that you specify for MaxResults, AWS Firewall Manager returns a NextToken value in the response that allows you to list another group of IDs. For the second and subsequent ListMemberAccountsRequest requests, specify the value of NextToken from the previous response to get information about another batch of member account IDs.

" + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

Specifies the number of member account IDs that you want AWS Firewall Manager to return for this request. If you have more IDs than the number that you specify for MaxResults, the response includes a NextToken value that you can use to get another batch of member account IDs. The maximum value for MaxResults is 100.

" + } + } + }, + "ListMemberAccountsResponse":{ + "type":"structure", + "members":{ + "MemberAccounts":{ + "shape":"MemberAccounts", + "documentation":"

An array of account IDs.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If you have more member account IDs than the number that you specified for MaxResults in the request, the response includes a NextToken value. To list more IDs, submit another ListMemberAccounts request, and specify the NextToken value from the response in the NextToken value in the next request.

" + } + } + }, "ListPoliciesRequest":{ "type":"structure", "members":{ @@ -447,6 +547,10 @@ "max":1024, "min":1 }, + "MemberAccounts":{ + "type":"list", + "member":{"shape":"AWSAccountId"} + }, "PaginationMaxResults":{ "type":"integer", "max":100, @@ -497,6 +601,14 @@ "RemediationEnabled":{ "shape":"Boolean", "documentation":"

Indicates if the policy should be automatically applied to new resources.

" + }, + "IncludeMap":{ + "shape":"CustomerPolicyScopeMap", + "documentation":"

Specifies the AWS account IDs to include in the policy. If IncludeMap is null, all accounts in the AWS Organization are included in the policy. If IncludeMap is not null, only values listed in IncludeMap will be included in the policy.

The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" + }, + "ExcludeMap":{ + "shape":"CustomerPolicyScopeMap", + "documentation":"

Specifies the AWS account IDs to exclude from the policy. The IncludeMap values are evaluated first, with all of the appropriate account IDs added to the policy. Then the accounts listed in ExcludeMap are removed, resulting in the final list of accounts to add to the policy.

The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" } }, "documentation":"

An AWS Firewall Manager policy.

" @@ -527,6 +639,10 @@ "ExpiredAt":{ "shape":"TimeStamp", "documentation":"

A time stamp that indicates when the returned information should be considered out-of-date.

" + }, + "IssueInfoMap":{ + "shape":"IssueInfoMap", + "documentation":"

Details about problems with dependent services, such as AWS WAF or AWS Config, that are causing a resource to be non-compliant. The details include the name of the dependent service and the error message recieved indicating the problem with the service.

" } }, "documentation":"

Describes the non-compliant resources in a member account for a specific AWS Firewall Manager policy. A maximum of 100 entries are displayed. If more than 100 resources are non-compliant, EvaluationLimitExceeded is set to True.

" @@ -557,6 +673,10 @@ "LastUpdated":{ "shape":"TimeStamp", "documentation":"

Time stamp of the last update to the EvaluationResult objects.

" + }, + "IssueInfoMap":{ + "shape":"IssueInfoMap", + "documentation":"

Details about problems with dependent services, such as AWS WAF or AWS Config, that are causing a resource to be non-compliant. The details include the name of the dependent service and the error message recieved indicating the problem with the service.

" } }, "documentation":"

Indicates whether the account is compliant with the specified policy. An account is considered non-compliant if it includes resources that are not protected by the policy.

" diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index 58b569d7..84d3972d 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -26,7 +26,8 @@ {"shape":"ResourceNumberLimitExceededException"}, {"shape":"InternalServiceException"}, {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Creates one or more partitions in a batch operation.

" }, @@ -74,7 +75,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Deletes multiple tables at once.

" + "documentation":"

Deletes multiple tables at once.

After completing this operation, you will no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these \"orphaned\" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

" }, "BatchDeleteTableVersion":{ "name":"BatchDeleteTableVersion", @@ -104,7 +105,8 @@ {"shape":"InvalidInputException"}, {"shape":"EntityNotFoundException"}, {"shape":"OperationTimeoutException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves partitions in a batch request.

" }, @@ -150,7 +152,8 @@ {"shape":"AlreadyExistsException"}, {"shape":"InvalidInputException"}, {"shape":"OperationTimeoutException"}, - {"shape":"ResourceNumberLimitExceededException"} + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Creates a connection definition in the Data Catalog.

" }, @@ -183,7 +186,8 @@ {"shape":"AlreadyExistsException"}, {"shape":"ResourceNumberLimitExceededException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Creates a new database in a Data Catalog.

" }, @@ -240,7 +244,8 @@ {"shape":"ResourceNumberLimitExceededException"}, {"shape":"InternalServiceException"}, {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Creates a new partition.

" }, @@ -259,6 +264,23 @@ ], "documentation":"

Transforms a directed acyclic graph (DAG) into code.

" }, + "CreateSecurityConfiguration":{ + "name":"CreateSecurityConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityConfigurationRequest"}, + "output":{"shape":"CreateSecurityConfigurationResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Creates a new security configuration.

" + }, "CreateTable":{ "name":"CreateTable", "http":{ @@ -273,7 +295,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"ResourceNumberLimitExceededException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Creates a new table definition in the Data Catalog.

" }, @@ -310,7 +333,8 @@ {"shape":"InternalServiceException"}, {"shape":"EntityNotFoundException"}, {"shape":"OperationTimeoutException"}, - {"shape":"ResourceNumberLimitExceededException"} + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Creates a new function definition in the Data Catalog.

" }, @@ -372,7 +396,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Removes a specified Database from a Data Catalog.

" + "documentation":"

Removes a specified Database from a Data Catalog.

After completing this operation, you will no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. AWS Glue deletes these \"orphaned\" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

" }, "DeleteDevEndpoint":{ "name":"DeleteDevEndpoint", @@ -421,6 +445,22 @@ ], "documentation":"

Deletes a specified partition.

" }, + "DeleteSecurityConfiguration":{ + "name":"DeleteSecurityConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityConfigurationRequest"}, + "output":{"shape":"DeleteSecurityConfigurationResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a specified security configuration.

" + }, "DeleteTable":{ "name":"DeleteTable", "http":{ @@ -435,7 +475,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Removes a table definition from the Data Catalog.

" + "documentation":"

Removes a table definition from the Data Catalog.

After completing this operation, you will no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these \"orphaned\" resources asynchronously in a timely manner, at the discretion of the service.

To ensure immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

" }, "DeleteTableVersion":{ "name":"DeleteTableVersion", @@ -536,7 +576,9 @@ "output":{"shape":"GetConnectionResponse"}, "errors":[ {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves a connection definition from the Data Catalog.

" }, @@ -550,7 +592,9 @@ "output":{"shape":"GetConnectionsResponse"}, "errors":[ {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves a list of connection definitions from the Data Catalog.

" }, @@ -594,6 +638,21 @@ ], "documentation":"

Retrieves metadata for all crawlers defined in the customer account.

" }, + "GetDataCatalogEncryptionSettings":{ + "name":"GetDataCatalogEncryptionSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataCatalogEncryptionSettingsRequest"}, + "output":{"shape":"GetDataCatalogEncryptionSettingsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the security configuration for a specified catalog.

" + }, "GetDatabase":{ "name":"GetDatabase", "http":{ @@ -606,7 +665,8 @@ {"shape":"InvalidInputException"}, {"shape":"EntityNotFoundException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves the definition of a specified database.

" }, @@ -621,7 +681,8 @@ "errors":[ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves all Databases defined in a given Data Catalog.

" }, @@ -764,7 +825,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves information about a specified partition.

" }, @@ -780,7 +842,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"OperationTimeoutException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves information about the partitions in a table.

" }, @@ -799,6 +862,38 @@ ], "documentation":"

Gets code to perform a specified mapping.

" }, + "GetSecurityConfiguration":{ + "name":"GetSecurityConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSecurityConfigurationRequest"}, + "output":{"shape":"GetSecurityConfigurationResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a specified security configuration.

" + }, + "GetSecurityConfigurations":{ + "name":"GetSecurityConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSecurityConfigurationsRequest"}, + "output":{"shape":"GetSecurityConfigurationsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a list of all security configurations.

" + }, "GetTable":{ "name":"GetTable", "http":{ @@ -811,7 +906,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves the Table definition in a Data Catalog for a specified table.

" }, @@ -827,7 +923,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves a specified version of a table.

" }, @@ -843,7 +940,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves a list of strings that identify available versions of a specified table.

" }, @@ -859,7 +957,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"OperationTimeoutException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves the definitions of some or all of the tables in a given Database.

" }, @@ -907,7 +1006,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves a specified function definition from the Data Catalog.

" }, @@ -923,7 +1023,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"OperationTimeoutException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Retrieves a multiple function definitions from the Data Catalog.

" }, @@ -941,6 +1042,21 @@ ], "documentation":"

Imports an existing Athena Data Catalog to AWS Glue

" }, + "PutDataCatalogEncryptionSettings":{ + "name":"PutDataCatalogEncryptionSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDataCatalogEncryptionSettingsRequest"}, + "output":{"shape":"PutDataCatalogEncryptionSettingsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Sets the security configuration for a specified catalog. Once the configuration has been set, the specified encryption is applied to every catalog write thereafter.

" + }, "ResetJobBookmark":{ "name":"ResetJobBookmark", "http":{ @@ -1101,7 +1217,9 @@ "errors":[ {"shape":"InvalidInputException"}, {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Updates a connection definition in the Data Catalog.

" }, @@ -1151,7 +1269,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Updates an existing database definition in a Data Catalog.

" }, @@ -1201,7 +1320,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Updates a partition.

" }, @@ -1219,7 +1339,8 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"ResourceNumberLimitExceededException"} + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Updates a metadata table in the Data Catalog.

" }, @@ -1252,7 +1373,8 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} ], "documentation":"

Updates an existing function definition in the Data Catalog.

" } @@ -1278,15 +1400,19 @@ }, "Arguments":{ "shape":"GenericMap", - "documentation":"

Arguments to be passed to the job.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" + "documentation":"

Arguments to be passed to the job run.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job run timeout in minutes. It overrides the timeout value of the job.

" + "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

" }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job run notification.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this action.

" } }, "documentation":"

Defines an action to be initiated by a trigger.

" @@ -1617,6 +1743,13 @@ "max":100, "min":0 }, + "CatalogEncryptionMode":{ + "type":"string", + "enum":[ + "DISABLED", + "SSE-KMS" + ] + }, "CatalogEntries":{ "type":"list", "member":{"shape":"CatalogEntry"} @@ -1690,6 +1823,27 @@ "type":"list", "member":{"shape":"NameString"} }, + "CloudWatchEncryption":{ + "type":"structure", + "members":{ + "CloudWatchEncryptionMode":{ + "shape":"CloudWatchEncryptionMode", + "documentation":"

The encryption mode to use for CloudWatch data.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The AWS ARN of the KMS key to be used to encrypt the data.

" + } + }, + "documentation":"

Specifies how CloudWatch data should be encrypted.

" + }, + "CloudWatchEncryptionMode":{ + "type":"string", + "enum":[ + "DISABLED", + "SSE-KMS" + ] + }, "CodeGenArgName":{"type":"string"}, "CodeGenArgValue":{"type":"string"}, "CodeGenEdge":{ @@ -1881,7 +2035,7 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

A list of key-value pairs used as parameters for this connection.

" + "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database.

  • PASSWORD - A password, if one is used, for the user name.

  • JDBC_DRIVER_JAR_URI - The S3 path of the a jar file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use).

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for the JDBC connection.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether SSL with hostname matching will be enforced for the JDBC connection on the client. The default is false.

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", @@ -1928,7 +2082,7 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

A list of key-value pairs used as parameters for this connection.

" + "documentation":"

These key-value pairs define parameters for the connection.

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", @@ -1962,7 +2116,8 @@ "JDBC_ENGINE_VERSION", "CONFIG_FILES", "INSTANCE_ID", - "JDBC_CONNECTION_URL" + "JDBC_CONNECTION_URL", + "JDBC_ENFORCE_SSL" ] }, "ConnectionType":{ @@ -2047,7 +2202,11 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + }, + "CrawlerSecurityConfiguration":{ + "shape":"CrawlerSecurityConfiguration", + "documentation":"

The name of the SecurityConfiguration structure to be used by this Crawler.

" } }, "documentation":"

Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.

" @@ -2127,6 +2286,11 @@ "documentation":"

The operation cannot be performed because the crawler is already running.

", "exception":true }, + "CrawlerSecurityConfiguration":{ + "type":"string", + "max":128, + "min":0 + }, "CrawlerState":{ "type":"string", "enum":[ @@ -2252,7 +2416,11 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + }, + "CrawlerSecurityConfiguration":{ + "shape":"CrawlerSecurityConfiguration", + "documentation":"

The name of the SecurityConfiguration structure to be used by this Crawler.

" } } }, @@ -2322,6 +2490,10 @@ "ExtraJarsS3Path":{ "shape":"GenericString", "documentation":"

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

" } } }, @@ -2380,6 +2552,10 @@ "shape":"GenericString", "documentation":"

The reason for a current failure in this DevEndpoint.

" }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure being used with this DevEndpoint.

" + }, "CreatedTimestamp":{ "shape":"TimestampValue", "documentation":"

The point in time at which this DevEndpoint was created.

" @@ -2463,11 +2639,15 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job timeout in minutes. The default is 2880 minutes (48 hours).

" + "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job notification.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this job.

" } } }, @@ -2559,6 +2739,36 @@ } } }, + "CreateSecurityConfigurationRequest":{ + "type":"structure", + "required":[ + "Name", + "EncryptionConfiguration" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name for the new security configuration.

" + }, + "EncryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration for the new security configuration.

" + } + } + }, + "CreateSecurityConfigurationResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name assigned to the new security configuration.

" + }, + "CreatedTimestamp":{ + "shape":"TimestampValue", + "documentation":"

The time at which the new security configuration was created.

" + } + } + }, "CreateTableRequest":{ "type":"structure", "required":[ @@ -2695,6 +2905,16 @@ "type":"list", "member":{"shape":"CodeGenNode"} }, + "DataCatalogEncryptionSettings":{ + "type":"structure", + "members":{ + "EncryptionAtRest":{ + "shape":"EncryptionAtRest", + "documentation":"

Specifies encryption-at-rest configuration for the Data Catalog.

" + } + }, + "documentation":"

Contains configuration information for maintaining Data Catalog security.

" + }, "Database":{ "type":"structure", "required":["Name"], @@ -2713,7 +2933,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

A list of key-value pairs that define parameters and properties of the database.

" + "documentation":"

These key-value pairs define parameters and properties of the database.

" }, "CreateTime":{ "shape":"Timestamp", @@ -2740,7 +2960,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

A list of key-value pairs that define parameters and properties of the database.

" + "documentation":"

Thes key-value pairs define parameters and properties of the database.

" } }, "documentation":"

The structure used to create or update a database.

" @@ -2897,6 +3117,21 @@ "members":{ } }, + "DeleteSecurityConfigurationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the security configuration to delete.

" + } + } + }, + "DeleteSecurityConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteTableRequest":{ "type":"structure", "required":[ @@ -3093,6 +3328,10 @@ "PublicKeys":{ "shape":"PublicKeysList", "documentation":"

A list of public keys to be used by the DevEndpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.

If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys: call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this DevEndpoint.

" } }, "documentation":"

A development endpoint where a developer can remotely debug ETL scripts.

" @@ -3129,6 +3368,39 @@ "type":"list", "member":{"shape":"DynamoDBTarget"} }, + "EncryptionAtRest":{ + "type":"structure", + "required":["CatalogEncryptionMode"], + "members":{ + "CatalogEncryptionMode":{ + "shape":"CatalogEncryptionMode", + "documentation":"

The encryption-at-rest mode for encrypting Data Catalog data.

" + }, + "SseAwsKmsKeyId":{ + "shape":"NameString", + "documentation":"

The ID of the AWS KMS key to use for encryption at rest.

" + } + }, + "documentation":"

Specifies encryption-at-rest configuration for the Data Catalog.

" + }, + "EncryptionConfiguration":{ + "type":"structure", + "members":{ + "S3Encryption":{ + "shape":"S3EncryptionList", + "documentation":"

The encryption configuration for S3 data.

" + }, + "CloudWatchEncryption":{ + "shape":"CloudWatchEncryption", + "documentation":"

The encryption configuration for CloudWatch.

" + }, + "JobBookmarksEncryption":{ + "shape":"JobBookmarksEncryption", + "documentation":"

The encryption configuration for Job Bookmarks.

" + } + }, + "documentation":"

Specifies an encryption configuration.

" + }, "EntityNotFoundException":{ "type":"structure", "members":{ @@ -3398,6 +3670,24 @@ } } }, + "GetDataCatalogEncryptionSettingsRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog for which to retrieve the security configuration. If none is supplied, the AWS account ID is used by default.

" + } + } + }, + "GetDataCatalogEncryptionSettingsResponse":{ + "type":"structure", + "members":{ + "DataCatalogEncryptionSettings":{ + "shape":"DataCatalogEncryptionSettings", + "documentation":"

The requested security configuration.

" + } + } + }, "GetDatabaseRequest":{ "type":"structure", "required":["Name"], @@ -3780,6 +4070,51 @@ } } }, + "GetSecurityConfigurationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the security configuration to retrieve.

" + } + } + }, + "GetSecurityConfigurationResponse":{ + "type":"structure", + "members":{ + "SecurityConfiguration":{ + "shape":"SecurityConfiguration", + "documentation":"

The requested security configuration

" + } + } + }, + "GetSecurityConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of results to return.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "GetSecurityConfigurationsResponse":{ + "type":"structure", + "members":{ + "SecurityConfigurations":{ + "shape":"SecurityConfigurationList", + "documentation":"

A list of security configurations.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if there are more security configurations to return.

" + } + } + }, "GetTableRequest":{ "type":"structure", "required":[ @@ -4050,6 +4385,17 @@ } } }, + "GlueEncryptionException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

An encryption operation failed.

", + "exception":true + }, "GrokClassifier":{ "type":"structure", "required":[ @@ -4231,11 +4577,15 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job timeout in minutes.

" + "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job notification.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this job.

" } }, "documentation":"

Specifies a job definition.

" @@ -4266,6 +4616,27 @@ }, "documentation":"

Defines a point which a job can resume processing.

" }, + "JobBookmarksEncryption":{ + "type":"structure", + "members":{ + "JobBookmarksEncryptionMode":{ + "shape":"JobBookmarksEncryptionMode", + "documentation":"

The encryption mode to use for Job bookmarks data.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The AWS ARN of the KMS key to be used to encrypt the data.

" + } + }, + "documentation":"

Specifies how Job bookmark data should be encrypted.

" + }, + "JobBookmarksEncryptionMode":{ + "type":"string", + "enum":[ + "DISABLED", + "CSE-KMS" + ] + }, "JobCommand":{ "type":"structure", "members":{ @@ -4346,11 +4717,19 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job run timeout in minutes.

" + "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

" }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job run notification.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this job run.

" + }, + "LogGroupName":{ + "shape":"GenericString", + "documentation":"

The name of the log group for secure logging, that can be server-side encrypted in CloudWatch using KMS. This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration will be used to encrypt the log group.

" } }, "documentation":"

Contains information about a job run.

" @@ -4412,11 +4791,15 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job timeout in minutes. The default is 2880 minutes (48 hours).

" + "documentation":"

The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).

" }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job notification.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this job.

" } }, "documentation":"

Specifies information used to update an existing job definition. Note that the previous job definition will be completely overwritten by this information.

" @@ -4459,6 +4842,10 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "KmsKeyArn":{ + "type":"string", + "pattern":"arn:aws:kms:.*" + }, "Language":{ "type":"string", "enum":[ @@ -4726,7 +5113,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Partition parameters, in the form of a list of key-value pairs.

" + "documentation":"

These key-value pairs define partition parameters.

" }, "LastAnalyzedTime":{ "shape":"Timestamp", @@ -4770,7 +5157,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Partition parameters, in the form of a list of key-value pairs.

" + "documentation":"

These key-value pairs define partition parameters.

" }, "LastAnalyzedTime":{ "shape":"Timestamp", @@ -4818,7 +5205,7 @@ }, "AvailabilityZone":{ "shape":"NameString", - "documentation":"

The connection's availability zone. This field is deprecated and has no effect.

" + "documentation":"

The connection's availability zone. This field is redundant, since the specified subnet implies the availability zone to be used. The field must be populated now, but will be deprecated in the future.

" } }, "documentation":"

Specifies the physical requirements for a connection.

" @@ -4874,6 +5261,25 @@ "member":{"shape":"GenericString"}, "max":5 }, + "PutDataCatalogEncryptionSettingsRequest":{ + "type":"structure", + "required":["DataCatalogEncryptionSettings"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog for which to set the security configuration. If none is supplied, the AWS account ID is used by default.

" + }, + "DataCatalogEncryptionSettings":{ + "shape":"DataCatalogEncryptionSettings", + "documentation":"

The security configuration to set.

" + } + } + }, + "PutDataCatalogEncryptionSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "PythonScript":{"type":"string"}, "ResetJobBookmarkRequest":{ "type":"structure", @@ -4940,6 +5346,32 @@ }, "RoleString":{"type":"string"}, "RowTag":{"type":"string"}, + "S3Encryption":{ + "type":"structure", + "members":{ + "S3EncryptionMode":{ + "shape":"S3EncryptionMode", + "documentation":"

The encryption mode to use for S3 data.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The AWS ARN of the KMS key to be used to encrypt the data.

" + } + }, + "documentation":"

Specifies how S3 data should be encrypted.

" + }, + "S3EncryptionList":{ + "type":"list", + "member":{"shape":"S3Encryption"} + }, + "S3EncryptionMode":{ + "type":"string", + "enum":[ + "DISABLED", + "SSE-KMS", + "SSE-S3" + ] + }, "S3Target":{ "type":"structure", "members":{ @@ -5030,6 +5462,28 @@ }, "SchemaPathString":{"type":"string"}, "ScriptLocationString":{"type":"string"}, + "SecurityConfiguration":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the security configuration.

" + }, + "CreatedTimeStamp":{ + "shape":"TimestampValue", + "documentation":"

The time at which this security configuration was created.

" + }, + "EncryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration associated with this security configuration.

" + } + }, + "documentation":"

Specifies a security configuration.

" + }, + "SecurityConfigurationList":{ + "type":"list", + "member":{"shape":"SecurityConfiguration"} + }, "SecurityGroupIdList":{ "type":"list", "member":{"shape":"NameString"}, @@ -5067,7 +5521,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

A list of initialization parameters for the SerDe, in key-value form.

" + "documentation":"

These key-value pairs define initialization parameters for the SerDe.

" } }, "documentation":"

Information about a serialization/deserialization program (SerDe) which serves as an extractor and loader.

" @@ -5142,11 +5596,15 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The job run timeout in minutes. It overrides the timeout value of the job.

" + "documentation":"

The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

" }, "NotificationProperty":{ "shape":"NotificationProperty", "documentation":"

Specifies configuration properties of a job run notification.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

The name of the SecurityConfiguration structure to be used with this job run.

" } } }, @@ -5347,7 +5805,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Properties associated with this table, as a list of key-value pairs.

" + "documentation":"

These key-value pairs define properties associated with the table.

" }, "CreatedBy":{ "shape":"NameString", @@ -5424,7 +5882,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Properties associated with this table, as a list of key-value pairs.

" + "documentation":"

These key-value pairs define properties associated with the table.

" } }, "documentation":"

Structure used to create or update the table.

" @@ -5684,7 +6142,11 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + }, + "CrawlerSecurityConfiguration":{ + "shape":"CrawlerSecurityConfiguration", + "documentation":"

The name of the SecurityConfiguration structure to be used by this Crawler.

" } } }, @@ -5756,7 +6218,7 @@ }, "DeletePublicKeys":{ "shape":"PublicKeysList", - "documentation":"

The list of public keys to be deleted from the DevEndpoint.

" + "documentation":"

The list of public keys to be deleted from the DevEndpoint.

" }, "CustomLibraries":{ "shape":"DevEndpointCustomLibraries", diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index b6197573..e5b5e645 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -1000,7 +1000,7 @@ } }, "documentation" : "AcceptInvitation request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "MasterId", "InvitationId" ] }, "AcceptInvitationResponse" : { "type" : "structure", @@ -1118,7 +1118,7 @@ } }, "documentation" : "ArchiveFindings request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "FindingIds" ] }, "ArchiveFindingsResponse" : { "type" : "structure", @@ -1249,13 +1249,25 @@ "CreateDetectorRequest" : { "type" : "structure", "members" : { + "ClientToken" : { + "shape" : "__stringMin0Max64", + "locationName" : "clientToken", + "documentation" : "The idempotency token for the create request.", + "idempotencyToken" : true + }, "Enable" : { "shape" : "Enable", "locationName" : "enable", "documentation" : "A boolean value that specifies whether the detector is to be enabled." + }, + "FindingPublishingFrequency" : { + "shape" : "FindingPublishingFrequency", + "locationName" : "findingPublishingFrequency", + "documentation" : "A enum value that specifies how frequently customer got Finding updates published." } }, - "documentation" : "CreateDetector request body." + "documentation" : "CreateDetector request body.", + "required" : [ "Enable" ] }, "CreateDetectorResponse" : { "type" : "structure", @@ -1309,7 +1321,7 @@ } }, "documentation" : "CreateFilterRequest request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "FindingCriteria", "Name" ] }, "CreateFilterResponse" : { "type" : "structure", @@ -1329,6 +1341,12 @@ "locationName" : "activate", "documentation" : "A boolean value that indicates whether GuardDuty is to start using the uploaded IPSet." }, + "ClientToken" : { + "shape" : "__stringMin0Max64", + "locationName" : "clientToken", + "documentation" : "The idempotency token for the create request.", + "idempotencyToken" : true + }, "DetectorId" : { "shape" : "__string", "location" : "uri", @@ -1352,7 +1370,7 @@ } }, "documentation" : "CreateIPSet request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "Format", "Activate", "Location", "Name" ] }, "CreateIPSetResponse" : { "type" : "structure", @@ -1379,7 +1397,7 @@ } }, "documentation" : "CreateMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountDetails" ] }, "CreateMembersResponse" : { "type" : "structure", @@ -1421,6 +1439,12 @@ "locationName" : "activate", "documentation" : "A boolean value that indicates whether GuardDuty is to start using the uploaded ThreatIntelSet." }, + "ClientToken" : { + "shape" : "__stringMin0Max64", + "locationName" : "clientToken", + "documentation" : "The idempotency token for the create request.", + "idempotencyToken" : true + }, "DetectorId" : { "shape" : "__string", "location" : "uri", @@ -1444,7 +1468,7 @@ } }, "documentation" : "CreateThreatIntelSet request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "Format", "Activate", "Location", "Name" ] }, "CreateThreatIntelSetResponse" : { "type" : "structure", @@ -1468,7 +1492,8 @@ "documentation" : "A list of account IDs of the AWS accounts that sent invitations to the current member account that you want to decline invitations from." } }, - "documentation" : "DeclineInvitations request body." + "documentation" : "DeclineInvitations request body.", + "required" : [ "AccountIds" ] }, "DeclineInvitationsResponse" : { "type" : "structure", @@ -1549,7 +1574,8 @@ "documentation" : "A list of account IDs of the AWS accounts that sent invitations to the current member account that you want to delete invitations from." } }, - "documentation" : "DeleteInvitations request body." + "documentation" : "DeleteInvitations request body.", + "required" : [ "AccountIds" ] }, "DeleteInvitationsResponse" : { "type" : "structure", @@ -1577,7 +1603,7 @@ } }, "documentation" : "DeleteMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountIds" ] }, "DeleteMembersResponse" : { "type" : "structure", @@ -1659,7 +1685,7 @@ } }, "documentation" : "DisassociateMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountIds" ] }, "DisassociateMembersResponse" : { "type" : "structure", @@ -1855,6 +1881,11 @@ "shape" : "FindingId" } }, + "FindingPublishingFrequency" : { + "type" : "string", + "documentation" : "A enum value that specifies how frequently customer got Finding updates published.", + "enum" : [ "FIFTEEN_MINUTES", "ONE_HOUR", "SIX_HOURS" ] + }, "FindingStatisticType" : { "type" : "string", "documentation" : "The types of finding statistics.", @@ -1931,6 +1962,10 @@ "shape" : "CreatedAt", "locationName" : "createdAt" }, + "FindingPublishingFrequency" : { + "shape" : "FindingPublishingFrequency", + "locationName" : "findingPublishingFrequency" + }, "ServiceRole" : { "shape" : "ServiceRole", "locationName" : "serviceRole" @@ -2014,7 +2049,7 @@ } }, "documentation" : "GetFindings request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "FindingIds" ] }, "GetFindingsResponse" : { "type" : "structure", @@ -2046,7 +2081,7 @@ } }, "documentation" : "GetFindingsStatistics request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "FindingStatisticTypes" ] }, "GetFindingsStatisticsResponse" : { "type" : "structure", @@ -2152,7 +2187,7 @@ } }, "documentation" : "GetMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountIds" ] }, "GetMembersResponse" : { "type" : "structure", @@ -2375,7 +2410,7 @@ } }, "documentation" : "InviteMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountIds" ] }, "InviteMembersResponse" : { "type" : "structure", @@ -3197,7 +3232,7 @@ } }, "documentation" : "StartMonitoringMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountIds" ] }, "StartMonitoringMembersResponse" : { "type" : "structure", @@ -3225,7 +3260,7 @@ } }, "documentation" : "StopMonitoringMembers request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "AccountIds" ] }, "StopMonitoringMembersResponse" : { "type" : "structure", @@ -3297,7 +3332,7 @@ } }, "documentation" : "UnarchiveFindings request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "FindingIds" ] }, "UnarchiveFindingsResponse" : { "type" : "structure", @@ -3340,6 +3375,11 @@ "shape" : "Enable", "locationName" : "enable", "documentation" : "Updated boolean value for the detector that specifies whether the detector is enabled." + }, + "FindingPublishingFrequency" : { + "shape" : "FindingPublishingFrequency", + "locationName" : "findingPublishingFrequency", + "documentation" : "A enum value that specifies how frequently customer got Finding updates published." } }, "documentation" : "UpdateDetector request body.", @@ -3424,7 +3464,7 @@ } }, "documentation" : "UpdateFindingsFeedback request body.", - "required" : [ "DetectorId" ] + "required" : [ "DetectorId", "Feedback", "FindingIds" ] }, "UpdateFindingsFeedbackResponse" : { "type" : "structure", diff --git a/botocore/data/inspector/2015-08-18/service-2.json b/botocore/data/inspector/2015-08-18/service-2.json index 83b0bb1b..e0c5637d 100644 --- a/botocore/data/inspector/2015-08-18/service-2.json +++ b/botocore/data/inspector/2015-08-18/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Inspector", + "serviceId":"Inspector", "signatureVersion":"v4", "targetPrefix":"InspectorService" }, diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index ae7565de..369c4cdc 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -301,6 +301,7 @@ "output":{"shape":"CreateOTAUpdateResponse"}, "errors":[ {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ResourceAlreadyExistsException"}, {"shape":"ThrottlingException"}, @@ -410,6 +411,7 @@ "output":{"shape":"CreateStreamResponse"}, "errors":[ {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ResourceAlreadyExistsException"}, {"shape":"ThrottlingException"}, @@ -608,7 +610,8 @@ {"shape":"ThrottlingException"}, {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"VersionConflictException"} ], "documentation":"

Delete an OTA update.

" }, @@ -2757,7 +2760,7 @@ }, "AdditionalParameterMap":{ "type":"map", - "key":{"shape":"Key"}, + "key":{"shape":"AttributeKey"}, "value":{"shape":"Value"} }, "AlarmName":{"type":"string"}, @@ -2937,6 +2940,7 @@ }, "documentation":"

The output from the AttachThingPrincipal operation.

" }, + "AttributeKey":{"type":"string"}, "AttributeName":{ "type":"string", "max":128, @@ -2968,7 +2972,7 @@ }, "AttributesMap":{ "type":"map", - "key":{"shape":"Key"}, + "key":{"shape":"AttributeKey"}, "value":{"shape":"Value"} }, "AuditCheckConfiguration":{ @@ -3317,6 +3321,16 @@ "AwsIotJobArn":{"type":"string"}, "AwsIotJobId":{"type":"string"}, "AwsIotSqlVersion":{"type":"string"}, + "AwsJobExecutionsRolloutConfig":{ + "type":"structure", + "members":{ + "maximumPerMinute":{ + "shape":"MaximumPerMinute", + "documentation":"

The maximum number of OTA update job executions started per minute.

" + } + }, + "documentation":"

Configuration for the rollout of OTA updates.

" + }, "Behavior":{ "type":"structure", "required":["name"], @@ -3670,6 +3684,7 @@ "pattern":"(0x)?[a-fA-F0-9]+" }, "CertificateName":{"type":"string"}, + "CertificatePathOnDevice":{"type":"string"}, "CertificatePem":{ "type":"string", "documentation":"

The PEM of a certificate.

", @@ -3829,6 +3844,10 @@ "shape":"SigningJobId", "documentation":"

The ID of the AWSSignerJob which was created to sign the file.

" }, + "startSigningJobParameter":{ + "shape":"StartSigningJobParameter", + "documentation":"

Describes the code-signing job.

" + }, "customCodeSigning":{ "shape":"CustomCodeSigning", "documentation":"

A custom method for code signing a file.

" @@ -3839,10 +3858,6 @@ "CodeSigningCertificateChain":{ "type":"structure", "members":{ - "stream":{ - "shape":"Stream", - "documentation":"

A stream of the certificate chain files.

" - }, "certificateName":{ "shape":"CertificateName", "documentation":"

The name of the certificate.

" @@ -3857,10 +3872,6 @@ "CodeSigningSignature":{ "type":"structure", "members":{ - "stream":{ - "shape":"Stream", - "documentation":"

A stream of the code signing signature.

" - }, "inlineDocument":{ "shape":"Signature", "documentation":"

A base64 encoded binary representation of the code signing signature.

" @@ -4113,6 +4124,10 @@ "shape":"TargetSelection", "documentation":"

Specifies whether the update will continue to run (CONTINUOUS), or will be complete after all the things specified as targets have completed the update (SNAPSHOT). If continuous, the update may also be run on a thing when a change is detected in a target. For example, an update will run on a thing when the thing is added to a target group, even after the update was completed by all things originally in the group. Valid values: CONTINUOUS | SNAPSHOT.

" }, + "awsJobExecutionsRolloutConfig":{ + "shape":"AwsJobExecutionsRolloutConfig", + "documentation":"

Configuration for the rollout of OTA updates.

" + }, "files":{ "shape":"OTAUpdateFiles", "documentation":"

The files to be streamed by the OTA update.

" @@ -4734,6 +4749,18 @@ "documentation":"

The OTA update ID to delete.

", "location":"uri", "locationName":"otaUpdateId" + }, + "deleteStream":{ + "shape":"DeleteStream", + "documentation":"

Specifies if the stream associated with an OTA update should be deleted when the OTA update is deleted.

", + "location":"querystring", + "locationName":"deleteStream" + }, + "forceDeleteAWSJob":{ + "shape":"ForceDeleteAWSJob", + "documentation":"

Specifies if the AWS Job associated with the OTA update should be deleted with the OTA update is deleted.

", + "location":"querystring", + "locationName":"forceDeleteAWSJob" } } }, @@ -4847,6 +4874,7 @@ "members":{ } }, + "DeleteStream":{"type":"boolean"}, "DeleteStreamRequest":{ "type":"structure", "required":["streamId"], @@ -5157,7 +5185,7 @@ "members":{ "endpointType":{ "shape":"EndpointType", - "documentation":"

The endpoint type.

", + "documentation":"

The endpoint type (such as iot:Data, iot:CredentialProvider and iot:Jobs).

", "location":"querystring", "locationName":"endpointType" } @@ -5611,6 +5639,16 @@ "documentation":"

The output for the DescribeThingType operation.

" }, "Description":{"type":"string"}, + "Destination":{ + "type":"structure", + "members":{ + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

Describes the location in S3 of the updated firmware.

" + } + }, + "documentation":"

Describes the location of the updated firmware.

" + }, "DetachPolicyRequest":{ "type":"structure", "required":[ @@ -5955,6 +5993,20 @@ "max":255, "min":0 }, + "FileLocation":{ + "type":"structure", + "members":{ + "stream":{ + "shape":"Stream", + "documentation":"

The stream that contains the OTA update.

" + }, + "s3Location":{ + "shape":"S3Location", + "documentation":"

The location of the updated firmware in S3.

" + } + }, + "documentation":"

The location of the OTA update.

" + }, "FileName":{"type":"string"}, "FirehoseAction":{ "type":"structure", @@ -5984,6 +6036,7 @@ }, "Flag":{"type":"boolean"}, "ForceDelete":{"type":"boolean"}, + "ForceDeleteAWSJob":{"type":"boolean"}, "ForceFlag":{"type":"boolean"}, "Forced":{"type":"boolean"}, "FunctionArn":{"type":"string"}, @@ -6032,6 +6085,10 @@ "thingIndexingConfiguration":{ "shape":"ThingIndexingConfiguration", "documentation":"

Thing indexing configuration.

" + }, + "thingGroupIndexingConfiguration":{ + "shape":"ThingGroupIndexingConfiguration", + "documentation":"

The index configuration.

" } } }, @@ -8409,6 +8466,11 @@ "max":250, "min":1 }, + "MaximumPerMinute":{ + "type":"integer", + "max":1000, + "min":1 + }, "Message":{ "type":"string", "max":128 @@ -8494,9 +8556,9 @@ "shape":"OTAUpdateFileVersion", "documentation":"

The file version.

" }, - "fileSource":{ - "shape":"Stream", - "documentation":"

The source of the file.

" + "fileLocation":{ + "shape":"FileLocation", + "documentation":"

The location of the updated firmware.

" }, "codeSigning":{ "shape":"CodeSigning", @@ -8513,7 +8575,7 @@ "OTAUpdateFiles":{ "type":"list", "member":{"shape":"OTAUpdateFile"}, - "max":10, + "max":50, "min":1 }, "OTAUpdateId":{ @@ -8549,6 +8611,10 @@ "shape":"Targets", "documentation":"

The targets of the OTA update.

" }, + "awsJobExecutionsRolloutConfig":{ + "shape":"AwsJobExecutionsRolloutConfig", + "documentation":"

Configuration for the rollout of OTA updates.

" + }, "targetSelection":{ "shape":"TargetSelection", "documentation":"

Specifies whether the OTA update will continue to run (CONTINUOUS), or will be complete after all those things specified as targets have completed the OTA update (SNAPSHOT). If continuous, the OTA update may also be run on a thing when a change is detected in a target. For example, an OTA update will run on a thing when the thing is added to a target group, even after the OTA update was completed by all things originally in the group.

" @@ -8664,6 +8730,7 @@ "max":100, "min":0 }, + "Platform":{"type":"string"}, "Policies":{ "type":"list", "member":{"shape":"Policy"} @@ -8752,6 +8819,7 @@ "type":"list", "member":{"shape":"Port"} }, + "Prefix":{"type":"string"}, "PresignedUrlConfig":{ "type":"structure", "members":{ @@ -8815,7 +8883,6 @@ }, "QueryString":{ "type":"string", - "max":1000, "min":1 }, "QueryVersion":{"type":"string"}, @@ -9291,6 +9358,20 @@ "type":"string", "min":1 }, + "S3Destination":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"S3Bucket", + "documentation":"

The S3 bucket that contains the updated firmware.

" + }, + "prefix":{ + "shape":"Prefix", + "documentation":"

The S3 prefix.

" + } + }, + "documentation":"

Describes the location of updated firmware in S3.

" + }, "S3FileUrl":{ "type":"string", "max":65535 @@ -9305,25 +9386,21 @@ }, "S3Location":{ "type":"structure", - "required":[ - "bucket", - "key" - ], "members":{ "bucket":{ "shape":"S3Bucket", - "documentation":"

The S3 bucket that contains the file to stream.

" + "documentation":"

The S3 bucket.

" }, "key":{ "shape":"S3Key", - "documentation":"

The name of the file within the S3 bucket to stream.

" + "documentation":"

The S3 key.

" }, "version":{ "shape":"S3Version", - "documentation":"

The file version.

" + "documentation":"

The S3 bucket version.

" } }, - "documentation":"

The location in S3 the contains the files to stream.

" + "documentation":"

The S3 location.

" }, "S3Version":{"type":"string"}, "SQL":{"type":"string"}, @@ -9427,6 +9504,10 @@ "things":{ "shape":"ThingDocumentList", "documentation":"

The things that match the search query.

" + }, + "thingGroups":{ + "shape":"ThingGroupDocumentList", + "documentation":"

The thing groups that match the search query.

" } } }, @@ -9613,6 +9694,25 @@ "Signature":{"type":"blob"}, "SignatureAlgorithm":{"type":"string"}, "SigningJobId":{"type":"string"}, + "SigningProfileName":{"type":"string"}, + "SigningProfileParameter":{ + "type":"structure", + "members":{ + "certificateArn":{ + "shape":"CertificateArn", + "documentation":"

Certificate ARN.

" + }, + "platform":{ + "shape":"Platform", + "documentation":"

The hardware platform of your device.

" + }, + "certificatePathOnDevice":{ + "shape":"CertificatePathOnDevice", + "documentation":"

The location of the code-signing certificate on your device.

" + } + }, + "documentation":"

Describes the code-signing profile.

" + }, "SkyfallMaxResults":{ "type":"integer", "max":250, @@ -9693,6 +9793,24 @@ } } }, + "StartSigningJobParameter":{ + "type":"structure", + "members":{ + "signingProfileParameter":{ + "shape":"SigningProfileParameter", + "documentation":"

Describes the code-signing profile.

" + }, + "signingProfileName":{ + "shape":"SigningProfileName", + "documentation":"

The code-signing profile name.

" + }, + "destination":{ + "shape":"Destination", + "documentation":"

The location to write the code-signed file.

" + } + }, + "documentation":"

Information required to start a signing job.

" + }, "StartThingRegistrationTaskRequest":{ "type":"structure", "required":[ @@ -9818,7 +9936,7 @@ "StreamFiles":{ "type":"list", "member":{"shape":"StreamFile"}, - "max":10, + "max":50, "min":1 }, "StreamId":{ @@ -10127,12 +10245,60 @@ "max":2028, "pattern":"[\\p{Graph}\\x20]*" }, + "ThingGroupDocument":{ + "type":"structure", + "members":{ + "thingGroupName":{ + "shape":"ThingGroupName", + "documentation":"

The thing group name.

" + }, + "thingGroupId":{ + "shape":"ThingGroupId", + "documentation":"

The thing group ID.

" + }, + "thingGroupDescription":{ + "shape":"ThingGroupDescription", + "documentation":"

The thing group description.

" + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

The thing group attributes.

" + }, + "parentGroupNames":{ + "shape":"ThingGroupNameList", + "documentation":"

Parent group names.

" + } + }, + "documentation":"

The thing group search index document.

" + }, + "ThingGroupDocumentList":{ + "type":"list", + "member":{"shape":"ThingGroupDocument"} + }, "ThingGroupId":{ "type":"string", "max":128, "min":1, "pattern":"[a-zA-Z0-9\\-]+" }, + "ThingGroupIndexingConfiguration":{ + "type":"structure", + "required":["thingGroupIndexingMode"], + "members":{ + "thingGroupIndexingMode":{ + "shape":"ThingGroupIndexingMode", + "documentation":"

Thing group indexing mode.

" + } + }, + "documentation":"

Thing group indexing configuration.

" + }, + "ThingGroupIndexingMode":{ + "type":"string", + "enum":[ + "OFF", + "ON" + ] + }, "ThingGroupList":{ "type":"list", "member":{"shape":"ThingGroupName"} @@ -10186,6 +10352,7 @@ "ThingId":{"type":"string"}, "ThingIndexingConfiguration":{ "type":"structure", + "required":["thingIndexingMode"], "members":{ "thingIndexingMode":{ "shape":"ThingIndexingMode", @@ -10660,6 +10827,10 @@ "thingIndexingConfiguration":{ "shape":"ThingIndexingConfiguration", "documentation":"

Thing indexing configuration.

" + }, + "thingGroupIndexingConfiguration":{ + "shape":"ThingGroupIndexingConfiguration", + "documentation":"

Thing group indexing configuration.

" } } }, diff --git a/botocore/data/iotanalytics/2017-11-27/service-2.json b/botocore/data/iotanalytics/2017-11-27/service-2.json index a047fd12..c5eba84b 100644 --- a/botocore/data/iotanalytics/2017-11-27/service-2.json +++ b/botocore/data/iotanalytics/2017-11-27/service-2.json @@ -82,7 +82,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a data set. A data set stores data retrieved from a data store by applying an SQL action.

This operation creates the skeleton of a data set. To populate the data set, call \"CreateDatasetContent\".

" + "documentation":"

Creates a data set. A data set stores data retrieved from a data store by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application). This operation creates the skeleton of a data set. The data set can be populated manually by calling \"CreateDatasetContent\" or automatically according to a \"trigger\" you specify.

" }, "CreateDatasetContent":{ "name":"CreateDatasetContent", @@ -91,6 +91,7 @@ "requestUri":"/datasets/{datasetName}/content" }, "input":{"shape":"CreateDatasetContentRequest"}, + "output":{"shape":"CreateDatasetContentResponse"}, "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"}, @@ -98,7 +99,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates the content of a data set by applying an SQL action.

" + "documentation":"

Creates the content of a data set by applying a SQL action.

" }, "CreateDatastore":{ "name":"CreateDatastore", @@ -341,6 +342,23 @@ ], "documentation":"

Retrieves a list of channels.

" }, + "ListDatasetContents":{ + "name":"ListDatasetContents", + "http":{ + "method":"GET", + "requestUri":"/datasets/{datasetName}/contents" + }, + "input":{"shape":"ListDatasetContentsRequest"}, + "output":{"shape":"ListDatasetContentsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists information about data set contents that have been created.

" + }, "ListDatasets":{ "name":"ListDatasets", "http":{ @@ -805,6 +823,40 @@ }, "documentation":"

A summary of information about a channel.

" }, + "ComputeType":{ + "type":"string", + "enum":[ + "ACU_1", + "ACU_2" + ] + }, + "ContainerDatasetAction":{ + "type":"structure", + "required":[ + "image", + "executionRoleArn", + "resourceConfiguration" + ], + "members":{ + "image":{ + "shape":"Image", + "documentation":"

The ARN of the Docker container stored in your account. The Docker container contains an application and needed support libraries and is used to generate data set contents.

" + }, + "executionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role which gives permission to the system to access needed resources in order to run the \"containerAction\". This includes, at minimum, permission to retrieve the data set contents which are the input to the containerized application.

" + }, + "resourceConfiguration":{ + "shape":"ResourceConfiguration", + "documentation":"

Configuration of the resource which executes the \"containerAction\".

" + }, + "variables":{ + "shape":"Variables", + "documentation":"

The values of variables used within the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of \"stringValue\", \"datasetContentVersionValue\", or \"outputFileUriValue\".

" + } + }, + "documentation":"

Information needed to run the \"containerAction\" to produce data set contents.

" + }, "CreateChannelRequest":{ "type":"structure", "required":["channelName"], @@ -852,6 +904,15 @@ } } }, + "CreateDatasetContentResponse":{ + "type":"structure", + "members":{ + "versionId":{ + "shape":"DatasetContentVersion", + "documentation":"

The version ID of the data set contents which are being created.

" + } + } + }, "CreateDatasetRequest":{ "type":"structure", "required":[ @@ -865,11 +926,15 @@ }, "actions":{ "shape":"DatasetActions", - "documentation":"

A list of actions that create the data set. Only one action is supported at this time.

" + "documentation":"

A list of actions that create the data set contents.

" }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

A list of triggers. A trigger causes data set content to be populated at a specified time or time interval. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" + "documentation":"

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" + }, + "retentionPeriod":{ + "shape":"RetentionPeriod", + "documentation":"

[Optional] How long, in days, message data is kept for the data set. If not given or set to null, the latest version of the dataset content plus the latest succeeded version (if they are different) are retained for at most 90 days.

" }, "tags":{ "shape":"TagList", @@ -887,6 +952,10 @@ "datasetArn":{ "shape":"DatasetArn", "documentation":"

The ARN of the data set.

" + }, + "retentionPeriod":{ + "shape":"RetentionPeriod", + "documentation":"

How long, in days, message data is kept for the data set.

" } } }, @@ -972,7 +1041,7 @@ }, "actions":{ "shape":"DatasetActions", - "documentation":"

The \"DatasetAction\" objects that create the data set.

" + "documentation":"

The \"DatasetAction\" objects that automatically create the data set contents.

" }, "triggers":{ "shape":"DatasetTriggers", @@ -989,6 +1058,10 @@ "lastUpdateTime":{ "shape":"Timestamp", "documentation":"

The last time the data set was updated.

" + }, + "retentionPeriod":{ + "shape":"RetentionPeriod", + "documentation":"

[Optional] How long, in days, message data is kept for the data set.

" } }, "documentation":"

Information about a data set.

" @@ -998,11 +1071,15 @@ "members":{ "actionName":{ "shape":"DatasetActionName", - "documentation":"

The name of the data set action.

" + "documentation":"

The name of the data set action by which data set contents are automatically created.

" }, "queryAction":{ "shape":"SqlQueryDatasetAction", "documentation":"

An \"SqlQueryDatasetAction\" object that contains the SQL query to modify the message.

" + }, + "containerAction":{ + "shape":"ContainerDatasetAction", + "documentation":"

Information which allows the system to run a containerized application in order to create the data set contents. The application must be in a Docker container along with any needed support libraries.

" } }, "documentation":"

A \"DatasetAction\" object specifying the query that creates the data set content.

" @@ -1013,6 +1090,33 @@ "min":1, "pattern":"^[a-zA-Z0-9_]+$" }, + "DatasetActionSummaries":{ + "type":"list", + "member":{"shape":"DatasetActionSummary"}, + "max":1, + "min":1 + }, + "DatasetActionSummary":{ + "type":"structure", + "members":{ + "actionName":{ + "shape":"DatasetActionName", + "documentation":"

The name of the action which automatically creates the data set's contents.

" + }, + "actionType":{ + "shape":"DatasetActionType", + "documentation":"

The type of action by which the data set's contents are automatically created.

" + } + }, + "documentation":"

" + }, + "DatasetActionType":{ + "type":"string", + "enum":[ + "QUERY", + "CONTAINER" + ] + }, "DatasetActions":{ "type":"list", "member":{"shape":"DatasetAction"}, @@ -1033,16 +1137,57 @@ "members":{ "state":{ "shape":"DatasetContentState", - "documentation":"

The state of the data set. Can be one of \"CREATING\", \"SUCCEEDED\" or \"FAILED\".

" + "documentation":"

The state of the data set contents. Can be one of \"READY\", \"CREATING\", \"SUCCEEDED\" or \"FAILED\".

" }, "reason":{ "shape":"Reason", - "documentation":"

The reason the data set is in this state.

" + "documentation":"

The reason the data set contents are in this state.

" } }, - "documentation":"

The state of the data set and the reason it is in this state.

" + "documentation":"

The state of the data set contents and the reason they are in this state.

" + }, + "DatasetContentSummaries":{ + "type":"list", + "member":{"shape":"DatasetContentSummary"} + }, + "DatasetContentSummary":{ + "type":"structure", + "members":{ + "version":{ + "shape":"DatasetContentVersion", + "documentation":"

The version of the data set contents.

" + }, + "status":{ + "shape":"DatasetContentStatus", + "documentation":"

The status of the data set contents.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The actual time the creation of the data set contents was started.

" + }, + "scheduleTime":{ + "shape":"Timestamp", + "documentation":"

The time the creation of the data set contents was scheduled to start.

" + } + }, + "documentation":"

Summary information about data set contents.

" + }, + "DatasetContentVersion":{ + "type":"string", + "max":36, + "min":7 + }, + "DatasetContentVersionValue":{ + "type":"structure", + "required":["datasetName"], + "members":{ + "datasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the data set whose latest contents will be used as input to the notebook or application.

" + } + }, + "documentation":"

The data set whose latest contents will be used as input to the notebook or application.

" }, - "DatasetContentVersion":{"type":"string"}, "DatasetEntries":{ "type":"list", "member":{"shape":"DatasetEntry"} @@ -1097,6 +1242,14 @@ "lastUpdateTime":{ "shape":"Timestamp", "documentation":"

The last time the data set was updated.

" + }, + "triggers":{ + "shape":"DatasetTriggers", + "documentation":"

A list of triggers. A trigger causes data set content to be populated at a specified time interval or when another data set is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

" + }, + "actions":{ + "shape":"DatasetActionSummaries", + "documentation":"

A list of \"DataActionSummary\" objects.

" } }, "documentation":"

A summary of information about a data set.

" @@ -1107,6 +1260,10 @@ "schedule":{ "shape":"Schedule", "documentation":"

The \"Schedule\" when the trigger is initiated.

" + }, + "dataset":{ + "shape":"TriggeringDataset", + "documentation":"

The data set whose content creation will trigger the creation of this data set's contents.

" } }, "documentation":"

The \"DatasetTrigger\" that specifies when the data set is automatically updated.

" @@ -1180,7 +1337,7 @@ "documentation":"

The estimated size of the data store.

" } }, - "documentation":"

Statistics information about the data store.

" + "documentation":"

Statistical information about the data store.

" }, "DatastoreStatus":{ "type":"string", @@ -1282,6 +1439,24 @@ } } }, + "DeltaTime":{ + "type":"structure", + "required":[ + "offsetSeconds", + "timeExpression" + ], + "members":{ + "offsetSeconds":{ + "shape":"OffsetSeconds", + "documentation":"

The number of seconds of estimated \"in flight\" lag time of message data.

" + }, + "timeExpression":{ + "shape":"TimeExpression", + "documentation":"

An expression by which the time of the message data may be determined. This may be the name of a timestamp field, or a SQL expression which is used to derive the time the message data was generated.

" + } + }, + "documentation":"

When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of your message data, so that data not processed from the previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" + }, "DescribeChannelRequest":{ "type":"structure", "required":["channelName"], @@ -1294,7 +1469,7 @@ }, "includeStatistics":{ "shape":"IncludeStatisticsFlag", - "documentation":"

If true, include statistics about the channel in the response.

", + "documentation":"

If true, additional statistical information about the channel is included in the response.

", "location":"querystring", "locationName":"includeStatistics" } @@ -1346,7 +1521,7 @@ }, "includeStatistics":{ "shape":"IncludeStatisticsFlag", - "documentation":"

If true, include statistics about the data store in the response.

", + "documentation":"

If true, additional statistical information about the datastore is included in the response.

", "location":"querystring", "locationName":"includeStatistics" } @@ -1361,7 +1536,7 @@ }, "statistics":{ "shape":"DatastoreStatistics", - "documentation":"

Statistics about the data store. Included if the 'includeStatistics' parameter is set to true in the request.

" + "documentation":"

Additional statistical information about the data store. Included if the 'includeStatistics' parameter is set to true in the request.

" } } }, @@ -1464,6 +1639,7 @@ }, "documentation":"

An activity that adds information from the AWS IoT Device Shadows service to a message.

" }, + "DoubleValue":{"type":"double"}, "EndTime":{"type":"timestamp"}, "EntryName":{"type":"string"}, "ErrorCode":{"type":"string"}, @@ -1495,7 +1671,7 @@ }, "filter":{ "shape":"FilterExpression", - "documentation":"

An expression that looks like an SQL WHERE clause that must return a Boolean value.

" + "documentation":"

An expression that looks like a SQL WHERE clause that must return a Boolean value.

" }, "next":{ "shape":"ActivityName", @@ -1544,6 +1720,10 @@ } } }, + "Image":{ + "type":"string", + "max":255 + }, "IncludeStatisticsFlag":{"type":"boolean"}, "InternalFailureException":{ "type":"structure", @@ -1636,6 +1816,43 @@ } } }, + "ListDatasetContentsRequest":{ + "type":"structure", + "required":["datasetName"], + "members":{ + "datasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the data set whose contents information you want to list.

", + "location":"uri", + "locationName":"datasetName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDatasetContentsResponse":{ + "type":"structure", + "members":{ + "datasetContentSummaries":{ + "shape":"DatasetContentSummaries", + "documentation":"

Summary information about data set contents that have been created.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to retrieve the next set of results, or null if there are no more results.

" + } + } + }, "ListDatasetsRequest":{ "type":"structure", "members":{ @@ -1853,6 +2070,22 @@ "member":{"shape":"Message"} }, "NextToken":{"type":"string"}, + "OffsetSeconds":{"type":"integer"}, + "OutputFileName":{ + "type":"string", + "pattern":"[\\w\\.-]{1,255}" + }, + "OutputFileUriValue":{ + "type":"structure", + "required":["fileName"], + "members":{ + "fileName":{ + "shape":"OutputFileName", + "documentation":"

The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.

" + } + }, + "documentation":"

The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.

" + }, "Pipeline":{ "type":"structure", "members":{ @@ -1979,6 +2212,22 @@ } } }, + "QueryFilter":{ + "type":"structure", + "members":{ + "deltaTime":{ + "shape":"DeltaTime", + "documentation":"

Used to limit data to that which has arrived since the last execution of the action. When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of you message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" + } + }, + "documentation":"

Information which is used to filter message data, to segregate it according to the time frame in which it arrives.

" + }, + "QueryFilters":{ + "type":"list", + "member":{"shape":"QueryFilter"}, + "max":1, + "min":0 + }, "Reason":{"type":"string"}, "RemoveAttributesActivity":{ "type":"structure", @@ -2056,6 +2305,24 @@ "max":2048, "min":20 }, + "ResourceConfiguration":{ + "type":"structure", + "required":[ + "computeType", + "volumeSizeInGB" + ], + "members":{ + "computeType":{ + "shape":"ComputeType", + "documentation":"

The type of the compute resource used to execute the \"containerAction\". Possible values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB).

" + }, + "volumeSizeInGB":{ + "shape":"VolumeSizeInGB", + "documentation":"

The size (in GB) of the persistent storage available to the resource instance used to execute the \"containerAction\" (min: 1, max: 50).

" + } + }, + "documentation":"

The configuration of the resource used to execute the \"containerAction\".

" + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -2208,7 +2475,11 @@ "members":{ "sqlQuery":{ "shape":"SqlQuery", - "documentation":"

An SQL query string.

" + "documentation":"

A SQL query string.

" + }, + "filters":{ + "shape":"QueryFilters", + "documentation":"

Pre-filters applied to message data.

" } }, "documentation":"

The SQL query to modify the message.

" @@ -2243,6 +2514,11 @@ } }, "StartTime":{"type":"timestamp"}, + "StringValue":{ + "type":"string", + "max":1024, + "min":0 + }, "Tag":{ "type":"structure", "required":[ @@ -2316,7 +2592,19 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TimeExpression":{"type":"string"}, "Timestamp":{"type":"timestamp"}, + "TriggeringDataset":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"DatasetName", + "documentation":"

The name of the data set whose content generation will trigger the new data set content generation.

" + } + }, + "documentation":"

Information about the data set whose content generation will trigger the new data set content generation.

" + }, "UnlimitedRetentionPeriod":{"type":"boolean"}, "UntagResourceRequest":{ "type":"structure", @@ -2375,11 +2663,15 @@ }, "actions":{ "shape":"DatasetActions", - "documentation":"

A list of \"DatasetAction\" objects. Only one action is supported at this time.

" + "documentation":"

A list of \"DatasetAction\" objects.

" }, "triggers":{ "shape":"DatasetTriggers", "documentation":"

A list of \"DatasetTrigger\" objects. The list can be empty or can contain up to five DataSetTrigger objects.

" + }, + "retentionPeriod":{ + "shape":"RetentionPeriod", + "documentation":"

How long, in days, message data is kept for the data set.

" } } }, @@ -2418,6 +2710,50 @@ } } }, + "Variable":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"VariableName", + "documentation":"

The name of the variable.

" + }, + "stringValue":{ + "shape":"StringValue", + "documentation":"

The value of the variable as a string.

" + }, + "doubleValue":{ + "shape":"DoubleValue", + "documentation":"

The value of the variable as a double (numeric).

", + "box":true + }, + "datasetContentVersionValue":{ + "shape":"DatasetContentVersionValue", + "documentation":"

The value of the variable as a structure that specifies a data set content version.

" + }, + "outputFileUriValue":{ + "shape":"OutputFileUriValue", + "documentation":"

The value of the variable as a structure that specifies an output file URI.

" + } + }, + "documentation":"

An instance of a variable to be passed to the \"containerAction\" execution. Each variable must have a name and a value given by one of \"stringValue\", \"datasetContentVersionValue\", or \"outputFileUriValue\".

" + }, + "VariableName":{ + "type":"string", + "max":256, + "min":1 + }, + "Variables":{ + "type":"list", + "member":{"shape":"Variable"}, + "max":50, + "min":0 + }, + "VolumeSizeInGB":{ + "type":"integer", + "max":50, + "min":1 + }, "errorMessage":{"type":"string"}, "resourceArn":{"type":"string"}, "resourceId":{"type":"string"} diff --git a/botocore/data/lambda/2014-11-11/service-2.json b/botocore/data/lambda/2014-11-11/service-2.json index 8351ac87..53e50010 100644 --- a/botocore/data/lambda/2014-11-11/service-2.json +++ b/botocore/data/lambda/2014-11-11/service-2.json @@ -3,6 +3,7 @@ "apiVersion":"2014-11-11", "endpointPrefix":"lambda", "serviceFullName":"AWS Lambda", + "serviceId":"Lambda", "signatureVersion":"v4", "protocol":"rest-json" }, diff --git a/botocore/data/lex-models/2017-04-19/service-2.json b/botocore/data/lex-models/2017-04-19/service-2.json index 21e445c4..63d4a087 100644 --- a/botocore/data/lex-models/2017-04-19/service-2.json +++ b/botocore/data/lex-models/2017-04-19/service-2.json @@ -3178,6 +3178,7 @@ "enum":[ "BUILDING", "READY", + "READY_BASIC_TESTING", "FAILED", "NOT_BUILT" ] diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 4ff0125c..c7a38555 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -1132,11 +1132,15 @@ "members":{ "logGroupName":{ "shape":"LogGroupName", - "documentation":"

The name of the log group.

" + "documentation":"

The name of the log group to search.

" }, "logStreamNames":{ "shape":"InputLogStreamNames", - "documentation":"

Optional list of log stream names.

" + "documentation":"

Filters the results to only logs from the log streams in this list.

If you specify a value for both logStreamNamePrefix and logStreamNames, but the value for logStreamNamePrefix does not match any log stream names specified in logStreamNames, the action returns an InvalidParameterException error.

" + }, + "logStreamNamePrefix":{ + "shape":"LogStreamName", + "documentation":"

Filters the results to include only events from log streams that have names starting with this prefix.

If you specify a value for both logStreamNamePrefix and logStreamNames, but the value for logStreamNamePrefix does not match any log stream names specified in logStreamNames, the action returns an InvalidParameterException error.

" }, "startTime":{ "shape":"Timestamp", @@ -1148,7 +1152,7 @@ }, "filterPattern":{ "shape":"FilterPattern", - "documentation":"

The filter pattern to use. If not provided, all the events are matched.

" + "documentation":"

The filter pattern to use. For more information, see Filter and Pattern Syntax.

If not provided, all the events are matched.

" }, "nextToken":{ "shape":"NextToken", diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 454631b0..861f9e83 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -32,7 +32,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -40,7 +40,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -48,7 +48,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Permanently remove a job from a queue. Once you have canceled a job, you can't start it again. You can't delete a running job." @@ -73,7 +73,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -81,7 +81,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -89,7 +89,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Create a new transcoding job. For information about jobs and job settings, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" @@ -114,7 +114,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -122,7 +122,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -130,7 +130,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Create a new job template. For information about job templates see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" @@ -155,7 +155,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -163,7 +163,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -171,7 +171,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Create a new preset. For information about job templates see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" @@ -196,7 +196,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -204,7 +204,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -212,7 +212,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Create a new transcoding queue. For information about job templates see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" @@ -237,7 +237,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -245,7 +245,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -253,7 +253,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Permanently delete a job template you have created." @@ -278,7 +278,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -286,7 +286,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -294,7 +294,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Permanently delete a preset you have created." @@ -319,7 +319,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -327,7 +327,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -335,7 +335,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Permanently delete a queue you have created." @@ -401,7 +401,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -409,7 +409,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -417,7 +417,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve the JSON for a specific completed transcoding job." @@ -442,7 +442,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -450,7 +450,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -458,7 +458,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve the JSON for a specific job template." @@ -483,7 +483,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -491,7 +491,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -499,7 +499,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve the JSON for a specific preset." @@ -524,7 +524,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -532,7 +532,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -540,7 +540,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve the JSON for a specific queue." @@ -565,7 +565,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -573,7 +573,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -581,7 +581,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve a JSON array of up to twenty of your job templates. This will return the templates themselves, not just a list of them. To retrieve the next twenty templates, use the nextToken string returned with the array" @@ -606,7 +606,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -614,7 +614,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -622,7 +622,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve a JSON array of up to twenty of your most recently created jobs. This array includes in-process, completed, and errored jobs. This will return the jobs themselves, not just a list of the jobs. To retrieve the twenty next most recent jobs, use the nextToken string returned with the array." @@ -647,7 +647,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -655,7 +655,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -663,7 +663,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve a JSON array of up to twenty of your presets. This will return the presets themselves, not just a list of them. To retrieve the next twenty presets, use the nextToken string returned with the array." @@ -688,7 +688,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -696,7 +696,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -704,7 +704,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve a JSON array of up to twenty of your queues. This will return the queues themselves, not just a list of them. To retrieve the next twenty queues, use the nextToken string returned with the array." @@ -729,7 +729,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -737,7 +737,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -745,7 +745,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Retrieve the tags for a MediaConvert resource." @@ -770,7 +770,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -778,7 +778,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -786,16 +786,16 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], - "documentation": "Tag a MediaConvert queue, preset, or job template. For information about these resource types, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + "documentation": "Add tags to a MediaConvert queue, preset, or job template. For information about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html" }, "UntagResource": { "name": "UntagResource", "http": { - "method": "DELETE", - "requestUri": "/2017-08-29/tags", + "method": "PUT", + "requestUri": "/2017-08-29/tags/{arn}", "responseCode": 200 }, "input": { @@ -811,7 +811,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -819,7 +819,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -827,10 +827,10 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], - "documentation": "Untag a MediaConvert queue, preset, or job template. For information about these resource types, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + "documentation": "Remove tags from a MediaConvert queue, preset, or job template. For information about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html" }, "UpdateJobTemplate": { "name": "UpdateJobTemplate", @@ -852,7 +852,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -860,7 +860,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -868,7 +868,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Modify one of your existing job templates." @@ -893,7 +893,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -901,7 +901,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -909,7 +909,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Modify one of your existing presets." @@ -934,7 +934,7 @@ }, { "shape": "InternalServerErrorException", - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, { "shape": "ForbiddenException", @@ -942,7 +942,7 @@ }, { "shape": "NotFoundException", - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, { "shape": "TooManyRequestsException", @@ -950,7 +950,7 @@ }, { "shape": "ConflictException", - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], "documentation": "Modify one of your existing queues." @@ -1043,11 +1043,7 @@ "locationName": "vbrQuality" } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode.", - "required": [ - "CodingMode", - "SampleRate" - ] + "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode." }, "AacSpecification": { "type": "string", @@ -1250,10 +1246,7 @@ "locationName": "wavSettings" } }, - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value you choose for Audio codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings", - "required": [ - "Codec" - ] + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value you choose for Audio codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings" }, "AudioDefaultSelection": { "type": "string", @@ -1313,10 +1306,7 @@ "documentation": "Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by the player (eg. English, or Director Commentary). Alphanumeric characters, spaces, and underscore are legal." } }, - "documentation": "Description of audio output", - "required": [ - "CodecSettings" - ] + "documentation": "Description of audio output" }, "AudioLanguageCodeControl": { "type": "string", @@ -1453,10 +1443,7 @@ "documentation": "Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., \"Audio Selector 1\"). The audio selector name parameter can be repeated to add any number of audio selectors to the group." } }, - "documentation": "Group of Audio Selectors", - "required": [ - "AudioSelectorNames" - ] + "documentation": "Group of Audio Selectors" }, "AudioSelectorType": { "type": "string", @@ -1500,6 +1487,15 @@ }, "documentation": "The service can't process your request because of a problem in the request. Please check your request form and syntax." }, + "BillingTagsSource": { + "type": "string", + "documentation": "Optional. Choose a tag type that AWS Billing and Cost Management will use to sort your AWS Elemental MediaConvert costs on any billing report that you set up. Any transcoding outputs that don't have an associated tag will appear in your billing report unsorted. If you don't choose a valid value for this field, your job outputs will appear on the billing report unsorted.", + "enum": [ + "QUEUE", + "PRESET", + "JOB_TEMPLATE" + ] + }, "BurninDestinationSettings": { "type": "structure", "members": { @@ -1578,13 +1574,7 @@ "documentation": "Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." } }, - "documentation": "Burn-In Destination Settings.", - "required": [ - "OutlineColor", - "Alignment", - "OutlineSize", - "FontOpacity" - ] + "documentation": "Burn-In Destination Settings." }, "BurninSubtitleAlignment": { "type": "string", @@ -1691,11 +1681,7 @@ "documentation": "Human readable information to indicate captions available for players (eg. English, or Spanish). Alphanumeric characters, spaces, and underscore are legal." } }, - "documentation": "Description of Caption output", - "required": [ - "DestinationSettings", - "CaptionSelectorName" - ] + "documentation": "Description of Caption output" }, "CaptionDescriptionPreset": { "type": "structure", @@ -1720,10 +1706,7 @@ "documentation": "Human readable information to indicate captions available for players (eg. English, or Spanish). Alphanumeric characters, spaces, and underscore are legal." } }, - "documentation": "Caption Description for preset", - "required": [ - "DestinationSettings" - ] + "documentation": "Caption Description for preset" }, "CaptionDestinationSettings": { "type": "structure", @@ -1753,10 +1736,7 @@ "locationName": "ttmlDestinationSettings" } }, - "documentation": "Specific settings required by destination type. Note that burnin_destination_settings are not available if the source of the caption data is Embedded or Teletext.", - "required": [ - "DestinationType" - ] + "documentation": "Specific settings required by destination type. Note that burnin_destination_settings are not available if the source of the caption data is Embedded or Teletext." }, "CaptionDestinationType": { "type": "string", @@ -1790,10 +1770,7 @@ "locationName": "sourceSettings" } }, - "documentation": "Set up captions in your outputs by first selecting them from your input here.", - "required": [ - "SourceSettings" - ] + "documentation": "Set up captions in your outputs by first selecting them from your input here." }, "CaptionSourceSettings": { "type": "structure", @@ -1823,10 +1800,7 @@ "locationName": "teletextSourceSettings" } }, - "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input.", - "required": [ - "SourceType" - ] + "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input." }, "CaptionSourceType": { "type": "string", @@ -1852,10 +1826,7 @@ "documentation": "List of output channels" } }, - "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel. Units are in dB. Acceptable values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification).", - "required": [ - "OutputChannels" - ] + "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel. Units are in dB. Acceptable values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification)." }, "CmafClientCache": { "type": "string", @@ -1898,10 +1869,7 @@ "locationName": "type" } }, - "documentation": "Settings for CMAF encryption", - "required": [ - "Type" - ] + "documentation": "Settings for CMAF encryption" }, "CmafEncryptionType": { "type": "string", @@ -1981,11 +1949,7 @@ "locationName": "writeHlsManifest" } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to CMAF_GROUP_SETTINGS. Each output in a CMAF Output Group may only contain a single video, audio, or caption output.", - "required": [ - "FragmentLength", - "SegmentLength" - ] + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to CMAF_GROUP_SETTINGS. Each output in a CMAF Output Group may only contain a single video, audio, or caption output." }, "CmafInitializationVectorInManifest": { "type": "string", @@ -2122,6 +2086,13 @@ "FALLBACK" ] }, + "Commitment": { + "type": "string", + "documentation": "The length of time that you commit to when you set up a pricing plan contract for a reserved queue.", + "enum": [ + "ONE_YEAR" + ] + }, "ConflictException": { "type": "structure", "members": { @@ -2134,7 +2105,7 @@ "error": { "httpStatusCode": 409 }, - "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." }, "ContainerSettings": { "type": "structure", @@ -2164,10 +2135,7 @@ "locationName": "mp4Settings" } }, - "documentation": "Container specific settings.", - "required": [ - "Container" - ] + "documentation": "Container specific settings." }, "ContainerType": { "type": "string", @@ -2188,6 +2156,10 @@ "CreateJobRequest": { "type": "structure", "members": { + "BillingTagsSource": { + "shape": "BillingTagsSource", + "locationName": "billingTagsSource" + }, "ClientRequestToken": { "shape": "__string", "locationName": "clientRequestToken", @@ -2328,12 +2300,22 @@ "Description": { "shape": "__string", "locationName": "description", - "documentation": "Optional. A description of the queue you are creating." + "documentation": "Optional. A description of the queue that you are creating." }, "Name": { "shape": "__string", "locationName": "name", - "documentation": "The name of the queue you are creating." + "documentation": "The name of the queue that you are creating." + }, + "PricingPlan": { + "shape": "PricingPlan", + "locationName": "pricingPlan", + "documentation": "Optional; default is on-demand. Specifies whether the pricing plan for the queue is on-demand or reserved. The pricing plan for the queue determines whether you pay on-demand or reserved pricing for the transcoding jobs you run through the queue. For reserved queue pricing, you must set up a contract. You can create a reserved queue contract through the AWS Elemental MediaConvert console." + }, + "ReservationPlanSettings": { + "shape": "ReservationPlanSettings", + "locationName": "reservationPlanSettings", + "documentation": "Details about the pricing plan for your reserved queue. Required for reserved queues and not applicable to on-demand queues." }, "Tags": { "shape": "__mapOf__string", @@ -2362,10 +2344,7 @@ "locationName": "spekeKeyProvider" } }, - "documentation": "Specifies DRM settings for DASH outputs.", - "required": [ - "SpekeKeyProvider" - ] + "documentation": "Specifies DRM settings for DASH outputs." }, "DashIsoGroupSettings": { "type": "structure", @@ -2407,13 +2386,14 @@ "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", "documentation": "Length of mpd segments to create (in seconds). Note that segments will end on the next keyframe after this number of seconds, so actual segment length may be longer. When Emit Single File is checked, the segmentation is internal to a single output file and it does not cause the creation of many output files as in other output types." + }, + "WriteSegmentTimelineInRepresentation": { + "shape": "DashIsoWriteSegmentTimelineInRepresentation", + "locationName": "writeSegmentTimelineInRepresentation", + "documentation": "When ENABLED, segment durations are indicated in the manifest using SegmentTimeline and SegmentTimeline will be promoted down into Representation from AdaptationSet." } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to DASH_ISO_GROUP_SETTINGS.", - "required": [ - "SegmentLength", - "FragmentLength" - ] + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to DASH_ISO_GROUP_SETTINGS." }, "DashIsoHbbtvCompliance": { "type": "string", @@ -2431,6 +2411,14 @@ "SEGMENTED_FILES" ] }, + "DashIsoWriteSegmentTimelineInRepresentation": { + "type": "string", + "documentation": "When ENABLED, segment durations are indicated in the manifest using SegmentTimeline and SegmentTimeline will be promoted down into Representation from AdaptationSet.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "DeinterlaceAlgorithm": { "type": "string", "documentation": "Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling headline at the bottom of the frame.", @@ -2533,6 +2521,14 @@ "members": { } }, + "DescribeEndpointsMode": { + "type": "string", + "documentation": "Optional field, defaults to DEFAULT. Specify DEFAULT for this operation to return your endpoints if any exist, or to create an endpoint for you and return it if one doesn't already exist. Specify GET_ONLY to return your endpoints if any exist, or an empty list if none exist.", + "enum": [ + "DEFAULT", + "GET_ONLY" + ] + }, "DescribeEndpointsRequest": { "type": "structure", "members": { @@ -2541,6 +2537,10 @@ "locationName": "maxResults", "documentation": "Optional. Max number of endpoints, up to twenty, that will be returned at one time." }, + "Mode": { + "shape": "DescribeEndpointsMode", + "locationName": "mode" + }, "NextToken": { "shape": "__string", "locationName": "nextToken", @@ -2591,12 +2591,7 @@ "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Inserts DVB Network Information Table (NIT) at the specified table repetition interval.", - "required": [ - "NetworkName", - "NitInterval", - "NetworkId" - ] + "documentation": "Inserts DVB Network Information Table (NIT) at the specified table repetition interval." }, "DvbSdtSettings": { "type": "structure", @@ -2701,13 +2696,7 @@ "documentation": "Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." } }, - "documentation": "DVB-Sub Destination Settings", - "required": [ - "OutlineColor", - "Alignment", - "OutlineSize", - "FontOpacity" - ] + "documentation": "DVB-Sub Destination Settings" }, "DvbSubSourceSettings": { "type": "structure", @@ -2787,10 +2776,7 @@ "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Inserts DVB Time and Date Table (TDT) at the specified table repetition interval.", - "required": [ - "TdtInterval" - ] + "documentation": "Inserts DVB Time and Date Table (TDT) at the specified table repetition interval." }, "Eac3AttenuationControl": { "type": "string", @@ -3054,7 +3040,7 @@ "documentation": "URL of endpoint" } }, - "documentation": "Describes account specific API endpoint" + "documentation": "Describes an account-specific API endpoint." }, "ExceptionBody": { "type": "structure", @@ -3120,10 +3106,7 @@ "documentation": "Specifies a time delta in seconds to offset the captions from the source file." } }, - "documentation": "Settings for File-based Captions in Source", - "required": [ - "SourceFile" - ] + "documentation": "Settings for File-based Captions in Source" }, "ForbiddenException": { "type": "structure", @@ -3240,7 +3223,7 @@ "Name": { "shape": "__string", "locationName": "name", - "documentation": "The name of the queue.", + "documentation": "The name of the queue that you want information about.", "location": "uri" } }, @@ -3410,10 +3393,7 @@ "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h264Settings. Specify the target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode.", - "required": [ - "QvbrQualityLevel" - ] + "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." }, "H264RateControlMode": { "type": "string", @@ -3812,10 +3792,7 @@ "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within h265Settings. Specify the target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode.", - "required": [ - "QvbrQualityLevel" - ] + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." }, "H265RateControlMode": { "type": "string", @@ -4150,11 +4127,7 @@ "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." } }, - "documentation": "Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the HVC1 for H.265 setting.", - "required": [ - "MaxContentLightLevel", - "MaxFrameAverageLightLevel" - ] + "documentation": "Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the HVC1 for H.265 setting." }, "HlsAdMarkers": { "type": "string", @@ -4260,10 +4233,7 @@ "locationName": "type" } }, - "documentation": "Settings for HLS encryption", - "required": [ - "Type" - ] + "documentation": "Settings for HLS encryption" }, "HlsEncryptionType": { "type": "string", @@ -4381,11 +4351,7 @@ "documentation": "Provides an extra millisecond delta offset to fine tune the timestamps." } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to HLS_GROUP_SETTINGS.", - "required": [ - "MinSegmentLength", - "SegmentLength" - ] + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to HLS_GROUP_SETTINGS." }, "HlsIFrameOnlyManifest": { "type": "string", @@ -4511,11 +4477,7 @@ "documentation": "Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format." } }, - "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion (Id3Insertion).", - "required": [ - "Timecode", - "Id3" - ] + "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion (Id3Insertion)." }, "ImageInserter": { "type": "structure", @@ -4526,10 +4488,7 @@ "documentation": "Image to insert. Must be 32 bit windows BMP, PNG, or TGA file. Must not be larger than the output frames." } }, - "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default.", - "required": [ - "InsertableImages" - ] + "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default." }, "Input": { "type": "structure", @@ -4594,10 +4553,7 @@ "locationName": "videoSelector" } }, - "documentation": "Specifies media input", - "required": [ - "FileInput" - ] + "documentation": "Specifies media input" }, "InputClipping": { "type": "structure", @@ -4776,14 +4732,7 @@ "documentation": "Specify the Width (Width) of the inserted image. Use a value that is less than or equal to the video resolution width. Leave this setting blank to use the native width of the image." } }, - "documentation": "Settings for Insertable Image", - "required": [ - "ImageY", - "ImageX", - "ImageInserterInput", - "Opacity", - "Layer" - ] + "documentation": "Settings for Insertable Image" }, "InternalServerErrorException": { "type": "structure", @@ -4797,7 +4746,7 @@ "error": { "httpStatusCode": 500 }, - "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + "documentation": "The service encountered an unexpected condition and can't fulfill your request." }, "Job": { "type": "structure", @@ -4807,6 +4756,10 @@ "locationName": "arn", "documentation": "An identifier for this resource that is unique within all of AWS." }, + "BillingTagsSource": { + "shape": "BillingTagsSource", + "locationName": "billingTagsSource" + }, "CreatedAt": { "shape": "__timestampUnix", "locationName": "createdAt", @@ -4908,11 +4861,7 @@ "locationName": "timedMetadataInsertion" } }, - "documentation": "JobSettings contains all the transcode settings for a job.", - "required": [ - "OutputGroups", - "Inputs" - ] + "documentation": "JobSettings contains all the transcode settings for a job." }, "JobStatus": { "type": "string", @@ -5025,10 +4974,7 @@ "locationName": "timedMetadataInsertion" } }, - "documentation": "JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it.", - "required": [ - "OutputGroups" - ] + "documentation": "JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it." }, "LanguageCode": { "type": "string", @@ -5409,7 +5355,7 @@ "Queues": { "shape": "__listOfQueue", "locationName": "queues", - "documentation": "List of queues" + "documentation": "List of queues." } } }, @@ -6246,10 +6192,7 @@ "locationName": "spekeKeyProvider" } }, - "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider.", - "required": [ - "SpekeKeyProvider" - ] + "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." }, "MsSmoothGroupSettings": { "type": "structure", @@ -6277,10 +6220,7 @@ "locationName": "manifestEncoding" } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to MS_SMOOTH_GROUP_SETTINGS.", - "required": [ - "FragmentLength" - ] + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to MS_SMOOTH_GROUP_SETTINGS." }, "MsSmoothManifestEncoding": { "type": "string", @@ -6322,10 +6262,7 @@ "locationName": "spatialFilterSettings" } }, - "documentation": "Enable the Noise reducer (NoiseReducer) feature to remove noise from your video output if necessary. Enable or disable this feature for each output individually. This setting is disabled by default. When you enable Noise reducer (NoiseReducer), you must also select a value for Noise reducer filter (NoiseReducerFilter).", - "required": [ - "Filter" - ] + "documentation": "Enable the Noise reducer (NoiseReducer) feature to remove noise from your video output if necessary. Enable or disable this feature for each output individually. This setting is disabled by default. When you enable Noise reducer (NoiseReducer), you must also select a value for Noise reducer filter (NoiseReducerFilter)." }, "NoiseReducerFilter": { "type": "string", @@ -6384,7 +6321,7 @@ "error": { "httpStatusCode": 404 }, - "documentation": "The resource you requested does not exist." + "documentation": "The resource you requested doesn't exist." }, "Order": { "type": "string", @@ -6447,10 +6384,7 @@ "documentation": "List of input channels" } }, - "documentation": "OutputChannel mapping settings.", - "required": [ - "InputChannels" - ] + "documentation": "OutputChannel mapping settings." }, "OutputDetail": { "type": "structure", @@ -6490,11 +6424,7 @@ "documentation": "This object holds groups of encoding settings, one group of settings per output." } }, - "documentation": "Group of outputs", - "required": [ - "Outputs", - "OutputGroupSettings" - ] + "documentation": "Group of outputs" }, "OutputGroupDetail": { "type": "structure", @@ -6535,10 +6465,7 @@ "locationName": "type" } }, - "documentation": "Output Group settings, including type", - "required": [ - "Type" - ] + "documentation": "Output Group settings, including type" }, "OutputGroupType": { "type": "string", @@ -6654,6 +6581,14 @@ }, "documentation": "Settings for preset" }, + "PricingPlan": { + "type": "string", + "documentation": "Specifies whether the pricing plan for the queue is On-demand or Reserved. The pricing plan for the queue determines whether you pay On-demand or Reserved pricing for the transcoding jobs that you run through the queue. For Reserved queue pricing, you must set up a contract. You can create a Reserved queue contract through the AWS Elemental MediaConvert console.", + "enum": [ + "ON_DEMAND", + "RESERVED" + ] + }, "ProresCodecProfile": { "type": "string", "documentation": "Use Profile (ProResCodecProfile) to specifiy the type of Apple ProRes codec to use for this output.", @@ -6780,44 +6715,55 @@ "CreatedAt": { "shape": "__timestampUnix", "locationName": "createdAt", - "documentation": "The timestamp in epoch seconds for queue creation." + "documentation": "The time stamp in epoch seconds for queue creation." }, "Description": { "shape": "__string", "locationName": "description", - "documentation": "An optional description you create for each queue." + "documentation": "An optional description that you create for each queue." }, "LastUpdated": { "shape": "__timestampUnix", "locationName": "lastUpdated", - "documentation": "The timestamp in epoch seconds when the queue was last updated." + "documentation": "The time stamp in epoch seconds when the queue was last updated." }, "Name": { "shape": "__string", "locationName": "name", - "documentation": "A name you create for each queue. Each name must be unique within your account." + "documentation": "A name that you create for each queue. Each name must be unique within your account." + }, + "PricingPlan": { + "shape": "PricingPlan", + "locationName": "pricingPlan", + "documentation": "Specifies whether the pricing plan for the queue is On-demand or Reserved. The pricing plan for the queue determines whether you pay On-demand or Reserved pricing for the transcoding jobs that you run through the queue. For Reserved queue pricing, you must set up a contract. You can create a Reserved queue contract through the AWS Elemental MediaConvert console." }, "ProgressingJobsCount": { "shape": "__integer", "locationName": "progressingJobsCount", - "documentation": "Estimated number of jobs in PROGRESSING status." + "documentation": "The estimated number of jobs with a PROGRESSING status." + }, + "ReservationPlan": { + "shape": "ReservationPlan", + "locationName": "reservationPlan", + "documentation": "Details about the pricing plan for your reserved queue. Required for reserved queues and not applicable to on-demand queues." }, "Status": { "shape": "QueueStatus", - "locationName": "status" + "locationName": "status", + "documentation": "Queues can be ACTIVE or PAUSED. If you pause a queue, the service won't begin processing jobs in that queue. Jobs that are running when you pause the queue continue to run until they finish or result in an error." }, "SubmittedJobsCount": { "shape": "__integer", "locationName": "submittedJobsCount", - "documentation": "Estimated number of jobs in SUBMITTED status." + "documentation": "The estimated number of jobs with a SUBMITTED status." }, "Type": { "shape": "Type", "locationName": "type", - "documentation": "A queue can be of two types: system or custom. System or built-in queues can't be modified or deleted by the user." + "documentation": "Specifies whether this queue is system or custom. System queues are built in. You can't modify or delete system queues. You can create and modify custom queues." } }, - "documentation": "MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs are submitted to a built-in default queue. User can create additional queues to separate the jobs of different categories or priority.", + "documentation": "You can use queues to manage the resources that are available to your AWS account for running multiple transcoding jobs at the same time. If you don't specify a queue, the service sends all jobs through the default queue. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/about-resource-allocation-and-job-prioritization.html.", "required": [ "Name" ] @@ -6832,7 +6778,7 @@ }, "QueueStatus": { "type": "string", - "documentation": "Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue will not begin. Jobs running when a queue is paused continue to run until they finish or error out.", + "documentation": "Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue won't begin. Jobs that are running when you pause a queue continue to run until they finish or result in an error.", "enum": [ "ACTIVE", "PAUSED" @@ -6862,13 +6808,7 @@ "documentation": "The distance, in pixels, between the rectangle and the top edge of the video frame. Specify only even numbers." } }, - "documentation": "Use Rectangle to identify a specific area of the video frame.", - "required": [ - "X", - "Y", - "Height", - "Width" - ] + "documentation": "Use Rectangle to identify a specific area of the video frame." }, "RemixSettings": { "type": "structure", @@ -6888,11 +6828,84 @@ "documentation": "Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8" } }, - "documentation": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides.", + "documentation": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." + }, + "RenewalType": { + "type": "string", + "documentation": "Specifies whether the pricing plan contract for your reserved queue automatically renews (AUTO_RENEW) or expires (EXPIRE) at the end of the contract period.", + "enum": [ + "AUTO_RENEW", + "EXPIRE" + ] + }, + "ReservationPlan": { + "type": "structure", + "members": { + "Commitment": { + "shape": "Commitment", + "locationName": "commitment", + "documentation": "The length of time that you commit to when you set up a pricing plan contract for a reserved queue." + }, + "ExpiresAt": { + "shape": "__timestampUnix", + "locationName": "expiresAt", + "documentation": "The time stamp, in epoch seconds, for when the pricing plan for this reserved queue expires." + }, + "PurchasedAt": { + "shape": "__timestampUnix", + "locationName": "purchasedAt", + "documentation": "The time stamp in epoch seconds when the reserved queue's reservation plan was created." + }, + "RenewalType": { + "shape": "RenewalType", + "locationName": "renewalType", + "documentation": "Specifies whether the pricing plan contract for your reserved queue automatically renews (AUTO_RENEW) or expires (EXPIRE) at the end of the contract period." + }, + "ReservedSlots": { + "shape": "__integer", + "locationName": "reservedSlots", + "documentation": "Specifies the number of reserved transcode slots (RTSs) for this queue. The number of RTS determines how many jobs the queue can process in parallel; each RTS can process one job at a time. To increase this number, create a replacement contract through the AWS Elemental MediaConvert console." + }, + "Status": { + "shape": "ReservationPlanStatus", + "locationName": "status", + "documentation": "Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED." + } + }, + "documentation": "Details about the pricing plan for your reserved queue. Required for reserved queues and not applicable to on-demand queues." + }, + "ReservationPlanSettings": { + "type": "structure", + "members": { + "Commitment": { + "shape": "Commitment", + "locationName": "commitment", + "documentation": "The length of time that you commit to when you set up a pricing plan contract for a reserved queue." + }, + "RenewalType": { + "shape": "RenewalType", + "locationName": "renewalType", + "documentation": "Specifies whether the pricing plan contract for your reserved queue automatically renews (AUTO_RENEW) or expires (EXPIRE) at the end of the contract period." + }, + "ReservedSlots": { + "shape": "__integer", + "locationName": "reservedSlots", + "documentation": "Specifies the number of reserved transcode slots (RTSs) for this queue. The number of RTS determines how many jobs the queue can process in parallel; each RTS can process one job at a time. To increase this number, create a replacement contract through the AWS Elemental MediaConvert console." + } + }, + "documentation": "Details about the pricing plan for your reserved queue. Required for reserved queues and not applicable to on-demand queues.", "required": [ - "ChannelsOut", - "ChannelMapping", - "ChannelsIn" + "ReservedSlots", + "Commitment", + "RenewalType" + ] + }, + "ReservationPlanStatus": { + "type": "string", + "documentation": "Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED.", + "enum": [ + "ACTIVE", + "EXPIRED" ] }, "ResourceTags": { @@ -6967,12 +6980,7 @@ "documentation": "Use URL (Url) to specify the SPEKE-compliant server that will provide keys for content." } }, - "documentation": "Settings for use with a SPEKE key provider", - "required": [ - "ResourceId", - "SystemIds", - "Url" - ] + "documentation": "Settings for use with a SPEKE key provider" }, "StaticKeyProvider": { "type": "structure", @@ -6998,11 +7006,7 @@ "documentation": "Relates to DRM implementation. The location of the license server used for protecting content." } }, - "documentation": "Settings for use with a SPEKE key provider.", - "required": [ - "Url", - "StaticKeyValue" - ] + "documentation": "Settings for use with a SPEKE key provider." }, "TagResourceRequest": { "type": "structure", @@ -7136,10 +7140,7 @@ "documentation": "Id3Insertions contains the array of Id3Insertion instances." } }, - "documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in your job. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects.", - "required": [ - "Id3Insertions" - ] + "documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in your job. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects." }, "Timing": { "type": "structure", @@ -7207,14 +7208,18 @@ "Arn": { "shape": "__string", "locationName": "arn", - "documentation": "The Amazon Resource Name (ARN) of the resource that you want to remove tags from. To get the ARN, send a GET request with the resource name." + "documentation": "The Amazon Resource Name (ARN) of the resource that you want to remove tags from. To get the ARN, send a GET request with the resource name.", + "location": "uri" }, "TagKeys": { "shape": "__listOf__string", "locationName": "tagKeys", "documentation": "The keys of the tags that you want to remove from the resource." } - } + }, + "required": [ + "Arn" + ] }, "UntagResourceResponse": { "type": "structure", @@ -7311,12 +7316,18 @@ "Name": { "shape": "__string", "locationName": "name", - "documentation": "The name of the queue you are modifying.", + "documentation": "The name of the queue that you are modifying.", "location": "uri" }, + "ReservationPlanSettings": { + "shape": "ReservationPlanSettings", + "locationName": "reservationPlanSettings", + "documentation": "Details about the pricing plan for your reserved queue. Required for reserved queues and not applicable to on-demand queues." + }, "Status": { "shape": "QueueStatus", - "locationName": "status" + "locationName": "status", + "documentation": "Pause or activate a queue by changing its status between ACTIVE and PAUSED. If you pause a queue, jobs in that queue won't begin. Jobs that are running when you pause the queue continue to run until they finish or result in an error." } }, "required": [ @@ -7371,10 +7382,7 @@ "locationName": "proresSettings" } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value you choose for Video codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings", - "required": [ - "Codec" - ] + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value you choose for Video codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings" }, "VideoDescription": { "type": "structure", @@ -7447,10 +7455,7 @@ "documentation": "Use Width (Width) to define the video resolution width, in pixels, for this output. If you don't provide a value here, the service will use the input width." } }, - "documentation": "Settings for video outputs", - "required": [ - "CodecSettings" - ] + "documentation": "Settings for video outputs" }, "VideoDetail": { "type": "structure", diff --git a/botocore/data/medialive/2017-10-14/paginators-1.json b/botocore/data/medialive/2017-10-14/paginators-1.json index c9bc58df..e0a675bc 100644 --- a/botocore/data/medialive/2017-10-14/paginators-1.json +++ b/botocore/data/medialive/2017-10-14/paginators-1.json @@ -29,6 +29,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Reservations" + }, + "DescribeSchedule": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ScheduleActions" } } } diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index a7a31279..2b006b85 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -12,6 +12,56 @@ "serviceAbbreviation": "MediaLive" }, "operations": { + "BatchUpdateSchedule": { + "name": "BatchUpdateSchedule", + "http": { + "method": "PUT", + "requestUri": "/prod/channels/{channelId}/schedule", + "responseCode": 200 + }, + "input": { + "shape": "BatchUpdateScheduleRequest" + }, + "output": { + "shape": "BatchUpdateScheduleResponse", + "documentation": "Successful update of the schedule." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "The update schedule request failed validation." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to update the channel schedule." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad Gateway Error" + }, + { + "shape": "NotFoundException", + "documentation": "The specified channel id does not exist." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on update schedule calls." + } + ], + "documentation": "Update a channel schedule" + }, "CreateChannel": { "name": "CreateChannel", "http": { @@ -572,6 +622,52 @@ ], "documentation": "Get details for a reservation." }, + "DescribeSchedule": { + "name": "DescribeSchedule", + "http": { + "method": "GET", + "requestUri": "/prod/channels/{channelId}/schedule", + "responseCode": 200 + }, + "input": { + "shape": "DescribeScheduleRequest" + }, + "output": { + "shape": "DescribeScheduleResponse", + "documentation": "An array of channel schedule actions." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to describe the channel schedule." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad Gateway Error" + }, + { + "shape": "NotFoundException", + "documentation": "The channel you're requesting a schedule describe for does not exist." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on describe schedule calls." + } + ], + "documentation": "Get a channel schedule" + }, "ListChannels": { "name": "ListChannels", "http": { @@ -1716,6 +1812,119 @@ }, "documentation": "Placeholder documentation for BadRequestException" }, + "BatchScheduleActionCreateRequest": { + "type": "structure", + "members": { + "ScheduleActions": { + "shape": "__listOfScheduleAction", + "locationName": "scheduleActions", + "documentation": "A list of schedule actions to create." + } + }, + "documentation": "A list of schedule actions to create.", + "required": [ + "ScheduleActions" + ] + }, + "BatchScheduleActionCreateResult": { + "type": "structure", + "members": { + "ScheduleActions": { + "shape": "__listOfScheduleAction", + "locationName": "scheduleActions", + "documentation": "Returned list of created schedule actions." + } + }, + "documentation": "Returned list of created schedule actions.", + "required": [ + "ScheduleActions" + ] + }, + "BatchScheduleActionDeleteRequest": { + "type": "structure", + "members": { + "ActionNames": { + "shape": "__listOf__string", + "locationName": "actionNames", + "documentation": "A list of schedule actions to delete, identified by unique name." + } + }, + "documentation": "A list of schedule actions to delete.", + "required": [ + "ActionNames" + ] + }, + "BatchScheduleActionDeleteResult": { + "type": "structure", + "members": { + "ScheduleActions": { + "shape": "__listOfScheduleAction", + "locationName": "scheduleActions", + "documentation": "Returned list of deleted schedule actions." + } + }, + "documentation": "Returned list of deleted schedule actions.", + "required": [ + "ScheduleActions" + ] + }, + "BatchUpdateScheduleRequest": { + "type": "structure", + "members": { + "ChannelId": { + "shape": "__string", + "location": "uri", + "locationName": "channelId", + "documentation": "Id of the channel whose schedule is being updated." + }, + "Creates": { + "shape": "BatchScheduleActionCreateRequest", + "locationName": "creates", + "documentation": "Schedule actions to create in the schedule." + }, + "Deletes": { + "shape": "BatchScheduleActionDeleteRequest", + "locationName": "deletes", + "documentation": "Schedule actions to delete from the schedule." + } + }, + "documentation": "List of actions to create and list of actions to delete.", + "required": [ + "ChannelId" + ] + }, + "BatchUpdateScheduleResponse": { + "type": "structure", + "members": { + "Creates": { + "shape": "BatchScheduleActionCreateResult", + "locationName": "creates", + "documentation": "Schedule actions created in the schedule." + }, + "Deletes": { + "shape": "BatchScheduleActionDeleteResult", + "locationName": "deletes", + "documentation": "Schedule actions deleted from the schedule." + } + }, + "documentation": "Response to a batch update schedule call." + }, + "BatchUpdateScheduleResult": { + "type": "structure", + "members": { + "Creates": { + "shape": "BatchScheduleActionCreateResult", + "locationName": "creates", + "documentation": "Schedule actions created in the schedule." + }, + "Deletes": { + "shape": "BatchScheduleActionDeleteResult", + "locationName": "deletes", + "documentation": "Schedule actions deleted from the schedule." + } + }, + "documentation": "Results of a batch schedule update." + }, "BlackoutSlate": { "type": "structure", "members": { @@ -2155,7 +2364,7 @@ "ValidationErrors": { "shape": "__listOfValidationError", "locationName": "validationErrors", - "documentation": "A collection of validation error responses from attempting to create a channel with a bouquet of settings." + "documentation": "A collection of validation error responses." } }, "documentation": "Placeholder documentation for ChannelConfigurationValidationError" @@ -3100,6 +3309,47 @@ }, "documentation": "Placeholder documentation for DescribeReservationResponse" }, + "DescribeScheduleRequest": { + "type": "structure", + "members": { + "ChannelId": { + "shape": "__string", + "location": "uri", + "locationName": "channelId", + "documentation": "Id of the channel whose schedule is being updated." + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + } + }, + "required": [ + "ChannelId" + ], + "documentation": "Request for a describe schedule call." + }, + "DescribeScheduleResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "The next token; for use in pagination." + }, + "ScheduleActions": { + "shape": "__listOfScheduleAction", + "locationName": "scheduleActions", + "documentation": "The list of schedule actions." + } + }, + "documentation": "Response for a describe schedule call." + }, "DvbNitSettings": { "type": "structure", "members": { @@ -3727,6 +3977,17 @@ ], "documentation": "Placeholder documentation for FixedAfd" }, + "FixedModeScheduleActionStartSettings": { + "type": "structure", + "members": { + "Time": { + "shape": "__string", + "locationName": "time", + "documentation": "Fixed timestamp action start. Conforms to ISO-8601." + } + }, + "documentation": "Fixed mode schedule action start settings" + }, "ForbiddenException": { "type": "structure", "members": { @@ -6917,6 +7178,93 @@ ], "documentation": "Placeholder documentation for RtmpOutputSettings" }, + "ScheduleAction": { + "type": "structure", + "members": { + "ActionName": { + "shape": "__string", + "locationName": "actionName", + "documentation": "The name of the action, must be unique within the schedule." + }, + "ScheduleActionSettings": { + "shape": "ScheduleActionSettings", + "locationName": "scheduleActionSettings", + "documentation": "Settings for this schedule action." + }, + "ScheduleActionStartSettings": { + "shape": "ScheduleActionStartSettings", + "locationName": "scheduleActionStartSettings", + "documentation": "When the action takes effect." + } + }, + "documentation": "A single schedule action.", + "required": [ + "ActionName", + "ScheduleActionStartSettings", + "ScheduleActionSettings" + ] + }, + "ScheduleActionSettings": { + "type": "structure", + "members": { + "Scte35ReturnToNetworkSettings": { + "shape": "Scte35ReturnToNetworkScheduleActionSettings", + "locationName": "scte35ReturnToNetworkSettings", + "documentation": "SCTE-35 Return to Network Settings" + }, + "Scte35SpliceInsertSettings": { + "shape": "Scte35SpliceInsertScheduleActionSettings", + "locationName": "scte35SpliceInsertSettings", + "documentation": "SCTE-35 Splice Insert Settings" + }, + "Scte35TimeSignalSettings": { + "shape": "Scte35TimeSignalScheduleActionSettings", + "locationName": "scte35TimeSignalSettings", + "documentation": "SCTE-35 Time Signal Settings" + }, + "StaticImageActivateSettings": { + "shape": "StaticImageActivateScheduleActionSettings", + "locationName": "staticImageActivateSettings", + "documentation": "Static Image Activate" + }, + "StaticImageDeactivateSettings": { + "shape": "StaticImageDeactivateScheduleActionSettings", + "locationName": "staticImageDeactivateSettings", + "documentation": "Static Image Deactivate" + } + }, + "documentation": "Settings for a single schedule action." + }, + "ScheduleActionStartSettings": { + "type": "structure", + "members": { + "FixedModeScheduleActionStartSettings": { + "shape": "FixedModeScheduleActionStartSettings", + "locationName": "fixedModeScheduleActionStartSettings", + "documentation": "Fixed timestamp action start. Conforms to ISO-8601." + } + }, + "documentation": "When the schedule action starts." + }, + "ScheduleDescribeResultModel": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "The next token; for use in pagination." + }, + "ScheduleActions": { + "shape": "__listOfScheduleAction", + "locationName": "scheduleActions", + "documentation": "The list of schedule actions." + } + }, + "documentation": "A complete schedule description.", + "required": [ + "ScheduleActions" + ] + }, "Scte20Convert608To708": { "type": "string", "enum": [ @@ -6980,6 +7328,179 @@ ], "documentation": "Placeholder documentation for Scte35AposWebDeliveryAllowedBehavior" }, + "Scte35ArchiveAllowedFlag": { + "type": "string", + "documentation": "SCTE-35 segmentation_descriptor archive_allowed_flag.", + "enum": [ + "ARCHIVE_NOT_ALLOWED", + "ARCHIVE_ALLOWED" + ] + }, + "Scte35DeliveryRestrictions": { + "type": "structure", + "members": { + "ArchiveAllowedFlag": { + "shape": "Scte35ArchiveAllowedFlag", + "locationName": "archiveAllowedFlag", + "documentation": "SCTE-35 segmentation_descriptor archive_allowed_flag." + }, + "DeviceRestrictions": { + "shape": "Scte35DeviceRestrictions", + "locationName": "deviceRestrictions", + "documentation": "SCTE-35 segmentation_descriptor web_delivery_allowed_flag." + }, + "NoRegionalBlackoutFlag": { + "shape": "Scte35NoRegionalBlackoutFlag", + "locationName": "noRegionalBlackoutFlag", + "documentation": "SCTE-35 segmentation_descriptor no_regional_blackout_flag." + }, + "WebDeliveryAllowedFlag": { + "shape": "Scte35WebDeliveryAllowedFlag", + "locationName": "webDeliveryAllowedFlag", + "documentation": "SCTE-35 segmentation_descriptor web_delivery_allowed_flag." + } + }, + "documentation": "SCTE-35 Delivery Restrictions.", + "required": [ + "DeviceRestrictions", + "ArchiveAllowedFlag", + "WebDeliveryAllowedFlag", + "NoRegionalBlackoutFlag" + ] + }, + "Scte35Descriptor": { + "type": "structure", + "members": { + "Scte35DescriptorSettings": { + "shape": "Scte35DescriptorSettings", + "locationName": "scte35DescriptorSettings", + "documentation": "SCTE-35 Descriptor Settings." + } + }, + "documentation": "SCTE-35 Descriptor.", + "required": [ + "Scte35DescriptorSettings" + ] + }, + "Scte35DescriptorSettings": { + "type": "structure", + "members": { + "SegmentationDescriptorScte35DescriptorSettings": { + "shape": "Scte35SegmentationDescriptor", + "locationName": "segmentationDescriptorScte35DescriptorSettings", + "documentation": "SCTE-35 Segmentation Descriptor." + } + }, + "documentation": "SCTE-35 Descriptor settings.", + "required": [ + "SegmentationDescriptorScte35DescriptorSettings" + ] + }, + "Scte35DeviceRestrictions": { + "type": "string", + "documentation": "SCTE-35 Device Restrictions.", + "enum": [ + "NONE", + "RESTRICT_GROUP0", + "RESTRICT_GROUP1", + "RESTRICT_GROUP2" + ] + }, + "Scte35NoRegionalBlackoutFlag": { + "type": "string", + "documentation": "SCTE-35 segmentation_descriptor no_regional_blackout_flag.", + "enum": [ + "REGIONAL_BLACKOUT", + "NO_REGIONAL_BLACKOUT" + ] + }, + "Scte35ReturnToNetworkScheduleActionSettings": { + "type": "structure", + "members": { + "SpliceEventId": { + "shape": "__integerMin0Max4294967295", + "locationName": "spliceEventId", + "documentation": "The splice_event_id for the SCTE-35 splice_insert, as defined in SCTE-35." + } + }, + "documentation": "SCTE-35 Return to Network Settings.", + "required": [ + "SpliceEventId" + ] + }, + "Scte35SegmentationCancelIndicator": { + "type": "string", + "documentation": "SCTE-35 segmentation_descriptor segmentation_event_cancel_indicator.", + "enum": [ + "SEGMENTATION_EVENT_NOT_CANCELED", + "SEGMENTATION_EVENT_CANCELED" + ] + }, + "Scte35SegmentationDescriptor": { + "type": "structure", + "members": { + "DeliveryRestrictions": { + "shape": "Scte35DeliveryRestrictions", + "locationName": "deliveryRestrictions", + "documentation": "SCTE-35 delivery restrictions." + }, + "SegmentNum": { + "shape": "__integerMin0Max255", + "locationName": "segmentNum", + "documentation": "SCTE-35 segmentation_descriptor segment_num." + }, + "SegmentationCancelIndicator": { + "shape": "Scte35SegmentationCancelIndicator", + "locationName": "segmentationCancelIndicator", + "documentation": "SCTE-35 segmentation_descriptor segmentation_event_cancel_indicator." + }, + "SegmentationDuration": { + "shape": "__integerMin0Max1099511627775", + "locationName": "segmentationDuration", + "documentation": "SCTE-35 segmentation_descriptor segmentation_duration specified in 90 KHz clock ticks." + }, + "SegmentationEventId": { + "shape": "__integerMin0Max4294967295", + "locationName": "segmentationEventId", + "documentation": "SCTE-35 segmentation_descriptor segmentation_event_id." + }, + "SegmentationTypeId": { + "shape": "__integerMin0Max255", + "locationName": "segmentationTypeId", + "documentation": "SCTE-35 segmentation_descriptor segmentation_type_id." + }, + "SegmentationUpid": { + "shape": "__string", + "locationName": "segmentationUpid", + "documentation": "SCTE-35 segmentation_descriptor segmentation_upid as a hex string." + }, + "SegmentationUpidType": { + "shape": "__integerMin0Max255", + "locationName": "segmentationUpidType", + "documentation": "SCTE-35 segmentation_descriptor segmentation_upid_type." + }, + "SegmentsExpected": { + "shape": "__integerMin0Max255", + "locationName": "segmentsExpected", + "documentation": "SCTE-35 segmentation_descriptor segments_expected." + }, + "SubSegmentNum": { + "shape": "__integerMin0Max255", + "locationName": "subSegmentNum", + "documentation": "SCTE-35 segmentation_descriptor sub_segment_num." + }, + "SubSegmentsExpected": { + "shape": "__integerMin0Max255", + "locationName": "subSegmentsExpected", + "documentation": "SCTE-35 segmentation_descriptor sub_segments_expected." + } + }, + "documentation": "SCTE-35 Segmentation Descriptor.", + "required": [ + "SegmentationEventId", + "SegmentationCancelIndicator" + ] + }, "Scte35SpliceInsert": { "type": "structure", "members": { @@ -7009,6 +7530,25 @@ ], "documentation": "Placeholder documentation for Scte35SpliceInsertNoRegionalBlackoutBehavior" }, + "Scte35SpliceInsertScheduleActionSettings": { + "type": "structure", + "members": { + "Duration": { + "shape": "__integerMin0Max8589934591", + "locationName": "duration", + "documentation": "The duration for the SCTE-35 splice_insert specified in 90KHz clock ticks. When duration is not specified the expectation is that a Scte35ReturnToNetwork action will be scheduled." + }, + "SpliceEventId": { + "shape": "__integerMin0Max4294967295", + "locationName": "spliceEventId", + "documentation": "The splice_event_id for the SCTE-35 splice_insert, as defined in SCTE-35." + } + }, + "documentation": "SCTE-35 Splice Insert Settings.", + "required": [ + "SpliceEventId" + ] + }, "Scte35SpliceInsertWebDeliveryAllowedBehavior": { "type": "string", "enum": [ @@ -7038,6 +7578,28 @@ }, "documentation": "Placeholder documentation for Scte35TimeSignalApos" }, + "Scte35TimeSignalScheduleActionSettings": { + "type": "structure", + "members": { + "Scte35Descriptors": { + "shape": "__listOfScte35Descriptor", + "locationName": "scte35Descriptors", + "documentation": "The list of SCTE-35 descriptors accompanying the SCTE-35 time_signal." + } + }, + "documentation": "SCTE-35 Time Signal Settings.", + "required": [ + "Scte35Descriptors" + ] + }, + "Scte35WebDeliveryAllowedFlag": { + "type": "string", + "documentation": "SCTE-35 segmentation_descriptor web_delivery_allowed_flag.", + "enum": [ + "WEB_DELIVERY_NOT_ALLOWED", + "WEB_DELIVERY_ALLOWED" + ] + }, "SmoothGroupAudioOnlyTimecodeControl": { "type": "string", "enum": [ @@ -7205,6 +7767,81 @@ }, "documentation": "Placeholder documentation for StartChannelResponse" }, + "StaticImageActivateScheduleActionSettings": { + "type": "structure", + "members": { + "Duration": { + "shape": "__integerMin0", + "locationName": "duration", + "documentation": "The duration in milliseconds for the image to remain in the video. If omitted or set to 0, duration is infinite and image will remain until explicitly deactivated." + }, + "FadeIn": { + "shape": "__integerMin0", + "locationName": "fadeIn", + "documentation": "The time in milliseconds for the image to fade in. Defaults to 0." + }, + "FadeOut": { + "shape": "__integerMin0", + "locationName": "fadeOut", + "documentation": "The time in milliseconds for the image to fade out. Defaults to 0." + }, + "Height": { + "shape": "__integerMin1", + "locationName": "height", + "documentation": "The height of the image when inserted into the video. Defaults to the native height of the image." + }, + "Image": { + "shape": "InputLocation", + "locationName": "image", + "documentation": "The image to overlay on the video. Must be a 32 bit BMP, PNG, or TGA file. Must not be larger than the input video." + }, + "ImageX": { + "shape": "__integerMin0", + "locationName": "imageX", + "documentation": "Placement of the left edge of the image on the horizontal axis in pixels. 0 is the left edge of the frame. Defaults to 0." + }, + "ImageY": { + "shape": "__integerMin0", + "locationName": "imageY", + "documentation": "Placement of the top edge of the image on the vertical axis in pixels. 0 is the top edge of the frame. Defaults to 0." + }, + "Layer": { + "shape": "__integerMin0Max7", + "locationName": "layer", + "documentation": "The Z order of the inserted image. Images with higher layer values will be inserted on top of images with lower layer values. Permitted values are 0-7 inclusive. Defaults to 0." + }, + "Opacity": { + "shape": "__integerMin0Max100", + "locationName": "opacity", + "documentation": "Opacity of image where 0 is transparent and 100 is fully opaque. Defaults to 100." + }, + "Width": { + "shape": "__integerMin1", + "locationName": "width", + "documentation": "The width of the image when inserted into the video. Defaults to the native width of the image." + } + }, + "documentation": "Static image activate.", + "required": [ + "Image" + ] + }, + "StaticImageDeactivateScheduleActionSettings": { + "type": "structure", + "members": { + "FadeOut": { + "shape": "__integerMin0", + "locationName": "fadeOut", + "documentation": "The time in milliseconds for the image to fade out. Defaults to 0." + }, + "Layer": { + "shape": "__integerMin0Max7", + "locationName": "layer", + "documentation": "The Z order of the inserted image. Images with higher layer values will be inserted on top of images with lower layer values. Permitted values are 0-7 inclusive. Defaults to 0." + } + }, + "documentation": "Static image deactivate." + }, "StaticKeySettings": { "type": "structure", "members": { @@ -7459,7 +8096,7 @@ "ValidationErrors": { "shape": "__listOfValidationError", "locationName": "validationErrors", - "documentation": "A collection of validation error responses from attempting to create a channel with a bouquet of settings." + "documentation": "A collection of validation error responses." } }, "exception": true, @@ -7922,6 +8559,12 @@ "max": 1000000, "documentation": "Placeholder documentation for __integerMin0Max1000000" }, + "__integerMin0Max1099511627775": { + "type": "long", + "min": 0, + "max": 1099511627775, + "documentation": "Unsigned 40-bit integer." + }, "__integerMin0Max128": { "type": "integer", "min": 0, @@ -7952,6 +8595,12 @@ "max": 3600, "documentation": "Placeholder documentation for __integerMin0Max3600" }, + "__integerMin0Max4294967295": { + "type": "long", + "min": 0, + "max": 4294967295, + "documentation": "Unsigned 32-bit integer." + }, "__integerMin0Max500": { "type": "integer", "min": 0, @@ -7988,6 +8637,12 @@ "max": 8191, "documentation": "Placeholder documentation for __integerMin0Max8191" }, + "__integerMin0Max8589934591": { + "type": "long", + "min": 0, + "max": 8589934591, + "documentation": "Unsigned 33-bit integer." + }, "__integerMin1": { "type": "integer", "min": 1, @@ -8285,6 +8940,20 @@ }, "documentation": "Placeholder documentation for __listOfReservation" }, + "__listOfScheduleAction": { + "type": "list", + "member": { + "shape": "ScheduleAction" + }, + "documentation": "The list of schedule actions." + }, + "__listOfScte35Descriptor": { + "type": "list", + "member": { + "shape": "Scte35Descriptor" + }, + "documentation": "List of Scte35 descriptors" + }, "__listOfValidationError": { "type": "list", "member": { diff --git a/botocore/data/mediapackage/2017-10-12/service-2.json b/botocore/data/mediapackage/2017-10-12/service-2.json index 84093d0d..263ad560 100644 --- a/botocore/data/mediapackage/2017-10-12/service-2.json +++ b/botocore/data/mediapackage/2017-10-12/service-2.json @@ -1,1903 +1,1993 @@ { - "documentation": "AWS Elemental MediaPackage", + "documentation": "AWS Elemental MediaPackage", "metadata": { - "apiVersion": "2017-10-12", - "endpointPrefix": "mediapackage", - "jsonVersion": "1.1", - "protocol": "rest-json", - "serviceAbbreviation": "MediaPackage", - "serviceFullName": "AWS Elemental MediaPackage", - "serviceId": "MediaPackage", - "signatureVersion": "v4", - "signingName": "mediapackage", + "apiVersion": "2017-10-12", + "endpointPrefix": "mediapackage", + "jsonVersion": "1.1", + "protocol": "rest-json", + "serviceAbbreviation": "MediaPackage", + "serviceFullName": "AWS Elemental MediaPackage", + "serviceId": "MediaPackage", + "signatureVersion": "v4", + "signingName": "mediapackage", "uid": "mediapackage-2017-10-12" - }, + }, "operations": { "CreateChannel": { - "documentation": "Creates a new Channel.", + "documentation": "Creates a new Channel.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "POST", - "requestUri": "/channels", + "method": "POST", + "requestUri": "/channels", "responseCode": 200 - }, + }, "input": { "shape": "CreateChannelRequest" - }, - "name": "CreateChannel", + }, + "name": "CreateChannel", "output": { - "documentation": "The new Channel record.", + "documentation": "The new Channel record.", "shape": "CreateChannelResponse" } - }, + }, "CreateOriginEndpoint": { - "documentation": "Creates a new OriginEndpoint record.", + "documentation": "Creates a new OriginEndpoint record.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "POST", - "requestUri": "/origin_endpoints", + "method": "POST", + "requestUri": "/origin_endpoints", "responseCode": 200 - }, + }, "input": { "shape": "CreateOriginEndpointRequest" - }, - "name": "CreateOriginEndpoint", + }, + "name": "CreateOriginEndpoint", "output": { - "documentation": "A new OriginEndpoint record.", + "documentation": "A new OriginEndpoint record.", "shape": "CreateOriginEndpointResponse" } - }, + }, "DeleteChannel": { - "documentation": "Deletes an existing Channel.", + "documentation": "Deletes an existing Channel.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "DELETE", - "requestUri": "/channels/{id}", + "method": "DELETE", + "requestUri": "/channels/{id}", "responseCode": 202 - }, + }, "input": { "shape": "DeleteChannelRequest" - }, - "name": "DeleteChannel", + }, + "name": "DeleteChannel", "output": { - "documentation": "The Channel has been deleted.", + "documentation": "The Channel has been deleted.", "shape": "DeleteChannelResponse" } - }, + }, "DeleteOriginEndpoint": { - "documentation": "Deletes an existing OriginEndpoint.", + "documentation": "Deletes an existing OriginEndpoint.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "DELETE", - "requestUri": "/origin_endpoints/{id}", + "method": "DELETE", + "requestUri": "/origin_endpoints/{id}", "responseCode": 202 - }, + }, "input": { "shape": "DeleteOriginEndpointRequest" - }, - "name": "DeleteOriginEndpoint", + }, + "name": "DeleteOriginEndpoint", "output": { - "documentation": "The OriginEndpoint has been deleted.", + "documentation": "The OriginEndpoint has been deleted.", "shape": "DeleteOriginEndpointResponse" } - }, + }, "DescribeChannel": { - "documentation": "Gets details about a Channel.", + "documentation": "Gets details about a Channel.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "GET", - "requestUri": "/channels/{id}", + "method": "GET", + "requestUri": "/channels/{id}", "responseCode": 200 - }, + }, "input": { "shape": "DescribeChannelRequest" - }, - "name": "DescribeChannel", + }, + "name": "DescribeChannel", "output": { - "documentation": "A Channel record.", + "documentation": "A Channel record.", "shape": "DescribeChannelResponse" } - }, + }, "DescribeOriginEndpoint": { - "documentation": "Gets details about an existing OriginEndpoint.", + "documentation": "Gets details about an existing OriginEndpoint.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "GET", - "requestUri": "/origin_endpoints/{id}", + "method": "GET", + "requestUri": "/origin_endpoints/{id}", "responseCode": 200 - }, + }, "input": { "shape": "DescribeOriginEndpointRequest" - }, - "name": "DescribeOriginEndpoint", + }, + "name": "DescribeOriginEndpoint", "output": { - "documentation": "An OriginEndpoint record.", + "documentation": "An OriginEndpoint record.", "shape": "DescribeOriginEndpointResponse" } - }, + }, "ListChannels": { - "documentation": "Returns a collection of Channels.", + "documentation": "Returns a collection of Channels.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "GET", - "requestUri": "/channels", + "method": "GET", + "requestUri": "/channels", "responseCode": 200 - }, + }, "input": { "shape": "ListChannelsRequest" - }, - "name": "ListChannels", + }, + "name": "ListChannels", "output": { - "documentation": "A collection of Channel records.", + "documentation": "A collection of Channel records.", "shape": "ListChannelsResponse" } - }, + }, "ListOriginEndpoints": { - "documentation": "Returns a collection of OriginEndpoint records.", + "documentation": "Returns a collection of OriginEndpoint records.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "GET", - "requestUri": "/origin_endpoints", + "method": "GET", + "requestUri": "/origin_endpoints", "responseCode": 200 - }, + }, "input": { "shape": "ListOriginEndpointsRequest" - }, - "name": "ListOriginEndpoints", + }, + "name": "ListOriginEndpoints", "output": { - "documentation": "A collection of OriginEndpoint records.", + "documentation": "A collection of OriginEndpoint records.", "shape": "ListOriginEndpointsResponse" } - }, + }, "RotateChannelCredentials": { - "documentation": "Changes the Channel ingest username and password.", + "deprecated": true, + "deprecatedMessage": "This API is deprecated. Please use RotateIngestEndpointCredentials instead", + "documentation": "Changes the Channel's first IngestEndpoint's username and password. WARNING - This API is deprecated. Please use RotateIngestEndpointCredentials instead", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "PUT", - "requestUri": "/channels/{id}/credentials", + "method": "PUT", + "requestUri": "/channels/{id}/credentials", "responseCode": 200 - }, + }, "input": { "shape": "RotateChannelCredentialsRequest" - }, - "name": "RotateChannelCredentials", + }, + "name": "RotateChannelCredentials", "output": { - "documentation": "The updated Channel record.", + "documentation": "The updated Channel record.", "shape": "RotateChannelCredentialsResponse" } - }, - "UpdateChannel": { - "documentation": "Updates an existing Channel.", + }, + "RotateIngestEndpointCredentials": { + "documentation": "Rotate the IngestEndpoint's username and password, as specified by the IngestEndpoint's id.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "PUT", - "requestUri": "/channels/{id}", + "method": "PUT", + "requestUri": "/channels/{id}/ingest_endpoints/{ingest_endpoint_id}/credentials", "responseCode": 200 - }, + }, + "input": { + "shape": "RotateIngestEndpointCredentialsRequest" + }, + "name": "RotateIngestEndpointCredentials", + "output": { + "documentation": "The updated Channel record.", + "shape": "RotateIngestEndpointCredentialsResponse" + } + }, + "UpdateChannel": { + "documentation": "Updates an existing Channel.", + "errors": [ + { + "shape": "UnprocessableEntityException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "http": { + "method": "PUT", + "requestUri": "/channels/{id}", + "responseCode": 200 + }, "input": { "shape": "UpdateChannelRequest" - }, - "name": "UpdateChannel", + }, + "name": "UpdateChannel", "output": { - "documentation": "The updated Channel record.", + "documentation": "The updated Channel record.", "shape": "UpdateChannelResponse" } - }, + }, "UpdateOriginEndpoint": { - "documentation": "Updates an existing OriginEndpoint.", + "documentation": "Updates an existing OriginEndpoint.", "errors": [ { "shape": "UnprocessableEntityException" - }, + }, { "shape": "InternalServerErrorException" - }, + }, { "shape": "ForbiddenException" - }, + }, { "shape": "NotFoundException" - }, + }, { "shape": "ServiceUnavailableException" - }, + }, { "shape": "TooManyRequestsException" } - ], + ], "http": { - "method": "PUT", - "requestUri": "/origin_endpoints/{id}", + "method": "PUT", + "requestUri": "/origin_endpoints/{id}", "responseCode": 200 - }, + }, "input": { "shape": "UpdateOriginEndpointRequest" - }, - "name": "UpdateOriginEndpoint", + }, + "name": "UpdateOriginEndpoint", "output": { - "documentation": "An updated OriginEndpoint record.", + "documentation": "An updated OriginEndpoint record.", "shape": "UpdateOriginEndpointResponse" } } - }, + }, "shapes": { "AdMarkers": { "enum": [ - "NONE", - "SCTE35_ENHANCED", + "NONE", + "SCTE35_ENHANCED", "PASSTHROUGH" - ], + ], "type": "string" - }, + }, "Channel": { - "documentation": "A Channel resource configuration.", + "documentation": "A Channel resource configuration.", "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", + "locationName": "arn", "shape": "__string" - }, + }, "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "HlsIngest": { - "locationName": "hlsIngest", + "locationName": "hlsIngest", "shape": "HlsIngest" - }, + }, "Id": { - "documentation": "The ID of the Channel.", - "locationName": "id", + "documentation": "The ID of the Channel.", + "locationName": "id", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "ChannelCreateParameters": { - "documentation": "Configuration parameters for a new Channel.", + "documentation": "Configuration parameters for a new Channel.", "members": { "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "Id": { - "documentation": "The ID of the Channel. The ID must be unique within the region and it\ncannot be changed after a Channel is created.\n", - "locationName": "id", + "documentation": "The ID of the Channel. The ID must be unique within the region and it\ncannot be changed after a Channel is created.\n", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "ChannelList": { - "documentation": "A collection of Channel records.", + "documentation": "A collection of Channel records.", "members": { "Channels": { - "documentation": "A list of Channel records.", - "locationName": "channels", + "documentation": "A list of Channel records.", + "locationName": "channels", "shape": "__listOfChannel" - }, + }, "NextToken": { - "documentation": "A token that can be used to resume pagination from the end of the collection.", - "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection.", + "locationName": "nextToken", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "ChannelUpdateParameters": { - "documentation": "Configuration parameters for updating an existing Channel.", + "documentation": "Configuration parameters for updating an existing Channel.", "members": { "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "CmafEncryption": { - "documentation": "A Common Media Application Format (CMAF) encryption configuration.", + "documentation": "A Common Media Application Format (CMAF) encryption configuration.", "members": { "KeyRotationIntervalSeconds": { - "documentation": "Time (in seconds) between each encryption key rotation.", - "locationName": "keyRotationIntervalSeconds", + "documentation": "Time (in seconds) between each encryption key rotation.", + "locationName": "keyRotationIntervalSeconds", "shape": "__integer" - }, + }, "SpekeKeyProvider": { - "locationName": "spekeKeyProvider", + "locationName": "spekeKeyProvider", "shape": "SpekeKeyProvider" } - }, + }, "required": [ "SpekeKeyProvider" - ], + ], "type": "structure" - }, + }, "CmafPackage": { - "documentation": "A Common Media Application Format (CMAF) packaging configuration.", + "documentation": "A Common Media Application Format (CMAF) packaging configuration.", "members": { "Encryption": { - "locationName": "encryption", + "locationName": "encryption", "shape": "CmafEncryption" - }, + }, "HlsManifests": { - "documentation": "A list of HLS manifest configurations", - "locationName": "hlsManifests", + "documentation": "A list of HLS manifest configurations", + "locationName": "hlsManifests", "shape": "__listOfHlsManifest" - }, + }, "SegmentDurationSeconds": { - "documentation": "Duration (in seconds) of each segment. Actual segments will be\nrounded to the nearest multiple of the source segment duration.\n", - "locationName": "segmentDurationSeconds", + "documentation": "Duration (in seconds) of each segment. Actual segments will be\nrounded to the nearest multiple of the source segment duration.\n", + "locationName": "segmentDurationSeconds", "shape": "__integer" - }, + }, "SegmentPrefix": { - "documentation": "An optional custom string that is prepended to the name of each segment. If not specified, it defaults to the ChannelId.", - "locationName": "segmentPrefix", + "documentation": "An optional custom string that is prepended to the name of each segment. If not specified, it defaults to the ChannelId.", + "locationName": "segmentPrefix", "shape": "__string" - }, + }, "StreamSelection": { - "locationName": "streamSelection", + "locationName": "streamSelection", "shape": "StreamSelection" } - }, + }, "type": "structure" - }, + }, "CmafPackageCreateOrUpdateParameters": { - "documentation": "A Common Media Application Format (CMAF) packaging configuration.", + "documentation": "A Common Media Application Format (CMAF) packaging configuration.", "members": { "Encryption": { - "locationName": "encryption", + "locationName": "encryption", "shape": "CmafEncryption" - }, + }, "HlsManifests": { - "documentation": "A list of HLS manifest configurations", - "locationName": "hlsManifests", + "documentation": "A list of HLS manifest configurations", + "locationName": "hlsManifests", "shape": "__listOfHlsManifestCreateOrUpdateParameters" - }, + }, "SegmentDurationSeconds": { - "documentation": "Duration (in seconds) of each segment. Actual segments will be\nrounded to the nearest multiple of the source segment duration.\n", - "locationName": "segmentDurationSeconds", + "documentation": "Duration (in seconds) of each segment. Actual segments will be\nrounded to the nearest multiple of the source segment duration.\n", + "locationName": "segmentDurationSeconds", "shape": "__integer" - }, + }, "SegmentPrefix": { - "documentation": "An optional custom string that is prepended to the name of each segment. If not specified, it defaults to the ChannelId.", - "locationName": "segmentPrefix", + "documentation": "An optional custom string that is prepended to the name of each segment. If not specified, it defaults to the ChannelId.", + "locationName": "segmentPrefix", "shape": "__string" - }, + }, "StreamSelection": { - "locationName": "streamSelection", + "locationName": "streamSelection", "shape": "StreamSelection" } - }, + }, "type": "structure" - }, + }, "CreateChannelRequest": { - "documentation": "A new Channel configuration.", + "documentation": "A new Channel configuration.", "members": { "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "Id": { - "documentation": "The ID of the Channel. The ID must be unique within the region and it\ncannot be changed after a Channel is created.\n", - "locationName": "id", + "documentation": "The ID of the Channel. The ID must be unique within the region and it\ncannot be changed after a Channel is created.\n", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "CreateChannelResponse": { "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", + "locationName": "arn", "shape": "__string" - }, + }, "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "HlsIngest": { - "locationName": "hlsIngest", + "locationName": "hlsIngest", "shape": "HlsIngest" - }, + }, "Id": { - "documentation": "The ID of the Channel.", - "locationName": "id", + "documentation": "The ID of the Channel.", + "locationName": "id", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "CreateOriginEndpointRequest": { - "documentation": "Configuration parameters used to create a new OriginEndpoint.", + "documentation": "Configuration parameters used to create a new OriginEndpoint.", "members": { "ChannelId": { - "documentation": "The ID of the Channel that the OriginEndpoint will be associated with.\nThis cannot be changed after the OriginEndpoint is created.\n", - "locationName": "channelId", + "documentation": "The ID of the Channel that the OriginEndpoint will be associated with.\nThis cannot be changed after the OriginEndpoint is created.\n", + "locationName": "channelId", "shape": "__string" - }, + }, "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackageCreateOrUpdateParameters" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint. The ID must be unique within the region\nand it cannot be changed after the OriginEndpoint is created.\n", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint. The ID must be unique within the region\nand it cannot be changed after the OriginEndpoint is created.\n", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string that will be used as the filename of the OriginEndpoint URL (defaults to \"index\").", - "locationName": "manifestName", + "documentation": "A short string that will be used as the filename of the OriginEndpoint URL (defaults to \"index\").", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "required": [ - "ChannelId", + "ChannelId", "Id" - ], + ], "type": "structure" - }, + }, "CreateOriginEndpointResponse": { "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", + "locationName": "arn", "shape": "__string" - }, + }, "ChannelId": { - "documentation": "The ID of the Channel the OriginEndpoint is associated with.", - "locationName": "channelId", + "documentation": "The ID of the Channel the OriginEndpoint is associated with.", + "locationName": "channelId", "shape": "__string" - }, + }, "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackage" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint.", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint.", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string appended to the end of the OriginEndpoint URL.", - "locationName": "manifestName", + "documentation": "A short string appended to the end of the OriginEndpoint URL.", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Url": { - "documentation": "The URL of the packaged OriginEndpoint for consumption.", - "locationName": "url", + "documentation": "The URL of the packaged OriginEndpoint for consumption.", + "locationName": "url", "shape": "__string" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "type": "structure" - }, + }, "DashEncryption": { - "documentation": "A Dynamic Adaptive Streaming over HTTP (DASH) encryption configuration.", + "documentation": "A Dynamic Adaptive Streaming over HTTP (DASH) encryption configuration.", "members": { "KeyRotationIntervalSeconds": { - "documentation": "Time (in seconds) between each encryption key rotation.", - "locationName": "keyRotationIntervalSeconds", + "documentation": "Time (in seconds) between each encryption key rotation.", + "locationName": "keyRotationIntervalSeconds", "shape": "__integer" - }, + }, "SpekeKeyProvider": { - "locationName": "spekeKeyProvider", + "locationName": "spekeKeyProvider", "shape": "SpekeKeyProvider" } - }, + }, "required": [ "SpekeKeyProvider" - ], + ], "type": "structure" - }, + }, "DashPackage": { - "documentation": "A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.", + "documentation": "A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.", "members": { "Encryption": { - "locationName": "encryption", + "locationName": "encryption", "shape": "DashEncryption" - }, + }, "ManifestWindowSeconds": { - "documentation": "Time window (in seconds) contained in each manifest.", - "locationName": "manifestWindowSeconds", + "documentation": "Time window (in seconds) contained in each manifest.", + "locationName": "manifestWindowSeconds", "shape": "__integer" - }, + }, "MinBufferTimeSeconds": { - "documentation": "Minimum duration (in seconds) that a player will buffer media before starting the presentation.", - "locationName": "minBufferTimeSeconds", + "documentation": "Minimum duration (in seconds) that a player will buffer media before starting the presentation.", + "locationName": "minBufferTimeSeconds", "shape": "__integer" - }, + }, "MinUpdatePeriodSeconds": { - "documentation": "Minimum duration (in seconds) between potential changes to the Dynamic Adaptive Streaming over HTTP (DASH) Media Presentation Description (MPD).", - "locationName": "minUpdatePeriodSeconds", + "documentation": "Minimum duration (in seconds) between potential changes to the Dynamic Adaptive Streaming over HTTP (DASH) Media Presentation Description (MPD).", + "locationName": "minUpdatePeriodSeconds", "shape": "__integer" - }, + }, "PeriodTriggers": { - "documentation": "A list of triggers that controls when the outgoing Dynamic Adaptive Streaming over HTTP (DASH)\nMedia Presentation Description (MPD) will be partitioned into multiple periods. If empty, the content will not\nbe partitioned into more than one period. If the list contains \"ADS\", new periods will be created where\nthe Channel source contains SCTE-35 ad markers.\n", - "locationName": "periodTriggers", + "documentation": "A list of triggers that controls when the outgoing Dynamic Adaptive Streaming over HTTP (DASH)\nMedia Presentation Description (MPD) will be partitioned into multiple periods. If empty, the content will not\nbe partitioned into more than one period. If the list contains \"ADS\", new periods will be created where\nthe Channel source contains SCTE-35 ad markers.\n", + "locationName": "periodTriggers", "shape": "__listOf__PeriodTriggersElement" - }, + }, "Profile": { - "documentation": "The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to \"HBBTV_1_5\", HbbTV 1.5 compliant output is enabled.", - "locationName": "profile", + "documentation": "The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to \"HBBTV_1_5\", HbbTV 1.5 compliant output is enabled.", + "locationName": "profile", "shape": "Profile" - }, + }, "SegmentDurationSeconds": { - "documentation": "Duration (in seconds) of each segment. Actual segments will be\nrounded to the nearest multiple of the source segment duration.\n", - "locationName": "segmentDurationSeconds", + "documentation": "Duration (in seconds) of each segment. Actual segments will be\nrounded to the nearest multiple of the source segment duration.\n", + "locationName": "segmentDurationSeconds", "shape": "__integer" - }, + }, "StreamSelection": { - "locationName": "streamSelection", + "locationName": "streamSelection", "shape": "StreamSelection" - }, + }, "SuggestedPresentationDelaySeconds": { - "documentation": "Duration (in seconds) to delay live content before presentation.", - "locationName": "suggestedPresentationDelaySeconds", + "documentation": "Duration (in seconds) to delay live content before presentation.", + "locationName": "suggestedPresentationDelaySeconds", "shape": "__integer" } - }, + }, "type": "structure" - }, + }, "DeleteChannelRequest": { "members": { "Id": { - "documentation": "The ID of the Channel to delete.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of the Channel to delete.", + "location": "uri", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "DeleteChannelResponse": { - "members": {}, + "members": {}, "type": "structure" - }, + }, "DeleteOriginEndpointRequest": { "members": { "Id": { - "documentation": "The ID of the OriginEndpoint to delete.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint to delete.", + "location": "uri", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "DeleteOriginEndpointResponse": { - "members": {}, + "members": {}, "type": "structure" - }, + }, "DescribeChannelRequest": { "members": { "Id": { - "documentation": "The ID of a Channel.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of a Channel.", + "location": "uri", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "DescribeChannelResponse": { "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", + "locationName": "arn", "shape": "__string" - }, + }, "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "HlsIngest": { - "locationName": "hlsIngest", + "locationName": "hlsIngest", "shape": "HlsIngest" - }, + }, "Id": { - "documentation": "The ID of the Channel.", - "locationName": "id", + "documentation": "The ID of the Channel.", + "locationName": "id", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "DescribeOriginEndpointRequest": { "members": { "Id": { - "documentation": "The ID of the OriginEndpoint.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint.", + "location": "uri", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "DescribeOriginEndpointResponse": { "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", + "locationName": "arn", "shape": "__string" - }, + }, "ChannelId": { - "documentation": "The ID of the Channel the OriginEndpoint is associated with.", - "locationName": "channelId", + "documentation": "The ID of the Channel the OriginEndpoint is associated with.", + "locationName": "channelId", "shape": "__string" - }, + }, "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackage" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint.", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint.", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string appended to the end of the OriginEndpoint URL.", - "locationName": "manifestName", + "documentation": "A short string appended to the end of the OriginEndpoint URL.", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Url": { - "documentation": "The URL of the packaged OriginEndpoint for consumption.", - "locationName": "url", + "documentation": "The URL of the packaged OriginEndpoint for consumption.", + "locationName": "url", "shape": "__string" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "type": "structure" - }, + }, "EncryptionMethod": { "enum": [ - "AES_128", + "AES_128", "SAMPLE_AES" - ], + ], "type": "string" - }, + }, "ForbiddenException": { - "documentation": "The client is not authorized to access the requested resource.", + "documentation": "The client is not authorized to access the requested resource.", "error": { "httpStatusCode": 403 - }, - "exception": true, + }, + "exception": true, "members": { "Message": { - "locationName": "message", + "locationName": "message", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "HlsEncryption": { - "documentation": "An HTTP Live Streaming (HLS) encryption configuration.", + "documentation": "An HTTP Live Streaming (HLS) encryption configuration.", "members": { "ConstantInitializationVector": { - "documentation": "A constant initialization vector for encryption (optional).\nWhen not specified the initialization vector will be periodically rotated.\n", - "locationName": "constantInitializationVector", + "documentation": "A constant initialization vector for encryption (optional).\nWhen not specified the initialization vector will be periodically rotated.\n", + "locationName": "constantInitializationVector", "shape": "__string" - }, + }, "EncryptionMethod": { - "documentation": "The encryption method to use.", - "locationName": "encryptionMethod", + "documentation": "The encryption method to use.", + "locationName": "encryptionMethod", "shape": "EncryptionMethod" - }, + }, "KeyRotationIntervalSeconds": { - "documentation": "Interval (in seconds) between each encryption key rotation.", - "locationName": "keyRotationIntervalSeconds", + "documentation": "Interval (in seconds) between each encryption key rotation.", + "locationName": "keyRotationIntervalSeconds", "shape": "__integer" - }, + }, "RepeatExtXKey": { - "documentation": "When enabled, the EXT-X-KEY tag will be repeated in output manifests.", - "locationName": "repeatExtXKey", + "documentation": "When enabled, the EXT-X-KEY tag will be repeated in output manifests.", + "locationName": "repeatExtXKey", "shape": "__boolean" - }, + }, "SpekeKeyProvider": { - "locationName": "spekeKeyProvider", + "locationName": "spekeKeyProvider", "shape": "SpekeKeyProvider" } - }, + }, "required": [ "SpekeKeyProvider" - ], + ], "type": "structure" - }, + }, "HlsIngest": { - "documentation": "An HTTP Live Streaming (HLS) ingest resource configuration.", + "documentation": "An HTTP Live Streaming (HLS) ingest resource configuration.", "members": { "IngestEndpoints": { - "documentation": "A list of endpoints to which the source stream should be sent.", - "locationName": "ingestEndpoints", + "documentation": "A list of endpoints to which the source stream should be sent.", + "locationName": "ingestEndpoints", "shape": "__listOfIngestEndpoint" } - }, + }, "type": "structure" - }, + }, "HlsManifest": { - "documentation": "A HTTP Live Streaming (HLS) manifest configuration.", + "documentation": "A HTTP Live Streaming (HLS) manifest configuration.", "members": { "AdMarkers": { - "documentation": "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", - "locationName": "adMarkers", + "documentation": "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", + "locationName": "adMarkers", "shape": "AdMarkers" - }, + }, "Id": { - "documentation": "The ID of the manifest. The ID must be unique within the OriginEndpoint and it cannot be changed after it is created.", - "locationName": "id", + "documentation": "The ID of the manifest. The ID must be unique within the OriginEndpoint and it cannot be changed after it is created.", + "locationName": "id", "shape": "__string" - }, + }, "IncludeIframeOnlyStream": { - "documentation": "When enabled, an I-Frame only stream will be included in the output.", - "locationName": "includeIframeOnlyStream", + "documentation": "When enabled, an I-Frame only stream will be included in the output.", + "locationName": "includeIframeOnlyStream", "shape": "__boolean" - }, + }, "ManifestName": { - "documentation": "An optional short string appended to the end of the OriginEndpoint URL. If not specified, defaults to the manifestName for the OriginEndpoint.", - "locationName": "manifestName", + "documentation": "An optional short string appended to the end of the OriginEndpoint URL. If not specified, defaults to the manifestName for the OriginEndpoint.", + "locationName": "manifestName", "shape": "__string" - }, + }, "PlaylistType": { - "documentation": "The HTTP Live Streaming (HLS) playlist type.\nWhen either \"EVENT\" or \"VOD\" is specified, a corresponding EXT-X-PLAYLIST-TYPE\nentry will be included in the media playlist.\n", - "locationName": "playlistType", + "documentation": "The HTTP Live Streaming (HLS) playlist type.\nWhen either \"EVENT\" or \"VOD\" is specified, a corresponding EXT-X-PLAYLIST-TYPE\nentry will be included in the media playlist.\n", + "locationName": "playlistType", "shape": "PlaylistType" - }, + }, "PlaylistWindowSeconds": { - "documentation": "Time window (in seconds) contained in each parent manifest.", - "locationName": "playlistWindowSeconds", + "documentation": "Time window (in seconds) contained in each parent manifest.", + "locationName": "playlistWindowSeconds", "shape": "__integer" - }, + }, "ProgramDateTimeIntervalSeconds": { - "documentation": "The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME tag\ninserted into manifests. Additionally, when an interval is specified\nID3Timed Metadata messages will be generated every 5 seconds using the\ningest time of the content.\nIf the interval is not specified, or set to 0, then\nno EXT-X-PROGRAM-DATE-TIME tags will be inserted into manifests and no\nID3Timed Metadata messages will be generated. Note that irrespective\nof this parameter, if any ID3 Timed Metadata is found in HTTP Live Streaming (HLS) input,\nit will be passed through to HLS output.\n", - "locationName": "programDateTimeIntervalSeconds", + "documentation": "The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME tag\ninserted into manifests. Additionally, when an interval is specified\nID3Timed Metadata messages will be generated every 5 seconds using the\ningest time of the content.\nIf the interval is not specified, or set to 0, then\nno EXT-X-PROGRAM-DATE-TIME tags will be inserted into manifests and no\nID3Timed Metadata messages will be generated. Note that irrespective\nof this parameter, if any ID3 Timed Metadata is found in HTTP Live Streaming (HLS) input,\nit will be passed through to HLS output.\n", + "locationName": "programDateTimeIntervalSeconds", "shape": "__integer" - }, + }, "Url": { - "documentation": "The URL of the packaged OriginEndpoint for consumption.", - "locationName": "url", + "documentation": "The URL of the packaged OriginEndpoint for consumption.", + "locationName": "url", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "HlsManifestCreateOrUpdateParameters": { - "documentation": "A HTTP Live Streaming (HLS) manifest configuration.", + "documentation": "A HTTP Live Streaming (HLS) manifest configuration.", "members": { "AdMarkers": { - "documentation": "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", - "locationName": "adMarkers", + "documentation": "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", + "locationName": "adMarkers", "shape": "AdMarkers" - }, + }, "Id": { - "documentation": "The ID of the manifest. The ID must be unique within the OriginEndpoint and it cannot be changed after it is created.", - "locationName": "id", + "documentation": "The ID of the manifest. The ID must be unique within the OriginEndpoint and it cannot be changed after it is created.", + "locationName": "id", "shape": "__string" - }, + }, "IncludeIframeOnlyStream": { - "documentation": "When enabled, an I-Frame only stream will be included in the output.", - "locationName": "includeIframeOnlyStream", + "documentation": "When enabled, an I-Frame only stream will be included in the output.", + "locationName": "includeIframeOnlyStream", "shape": "__boolean" - }, + }, "ManifestName": { - "documentation": "An optional short string appended to the end of the OriginEndpoint URL. If not specified, defaults to the manifestName for the OriginEndpoint.", - "locationName": "manifestName", + "documentation": "An optional short string appended to the end of the OriginEndpoint URL. If not specified, defaults to the manifestName for the OriginEndpoint.", + "locationName": "manifestName", "shape": "__string" - }, + }, "PlaylistType": { - "documentation": "The HTTP Live Streaming (HLS) playlist type.\nWhen either \"EVENT\" or \"VOD\" is specified, a corresponding EXT-X-PLAYLIST-TYPE\nentry will be included in the media playlist.\n", - "locationName": "playlistType", + "documentation": "The HTTP Live Streaming (HLS) playlist type.\nWhen either \"EVENT\" or \"VOD\" is specified, a corresponding EXT-X-PLAYLIST-TYPE\nentry will be included in the media playlist.\n", + "locationName": "playlistType", "shape": "PlaylistType" - }, + }, "PlaylistWindowSeconds": { - "documentation": "Time window (in seconds) contained in each parent manifest.", - "locationName": "playlistWindowSeconds", + "documentation": "Time window (in seconds) contained in each parent manifest.", + "locationName": "playlistWindowSeconds", "shape": "__integer" - }, + }, "ProgramDateTimeIntervalSeconds": { - "documentation": "The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME tag\ninserted into manifests. Additionally, when an interval is specified\nID3Timed Metadata messages will be generated every 5 seconds using the\ningest time of the content.\nIf the interval is not specified, or set to 0, then\nno EXT-X-PROGRAM-DATE-TIME tags will be inserted into manifests and no\nID3Timed Metadata messages will be generated. Note that irrespective\nof this parameter, if any ID3 Timed Metadata is found in HTTP Live Streaming (HLS) input,\nit will be passed through to HLS output.\n", - "locationName": "programDateTimeIntervalSeconds", + "documentation": "The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME tag\ninserted into manifests. Additionally, when an interval is specified\nID3Timed Metadata messages will be generated every 5 seconds using the\ningest time of the content.\nIf the interval is not specified, or set to 0, then\nno EXT-X-PROGRAM-DATE-TIME tags will be inserted into manifests and no\nID3Timed Metadata messages will be generated. Note that irrespective\nof this parameter, if any ID3 Timed Metadata is found in HTTP Live Streaming (HLS) input,\nit will be passed through to HLS output.\n", + "locationName": "programDateTimeIntervalSeconds", "shape": "__integer" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "HlsPackage": { - "documentation": "An HTTP Live Streaming (HLS) packaging configuration.", + "documentation": "An HTTP Live Streaming (HLS) packaging configuration.", "members": { "AdMarkers": { - "documentation": "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", - "locationName": "adMarkers", + "documentation": "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", + "locationName": "adMarkers", "shape": "AdMarkers" - }, + }, "Encryption": { - "locationName": "encryption", + "locationName": "encryption", "shape": "HlsEncryption" - }, + }, "IncludeIframeOnlyStream": { - "documentation": "When enabled, an I-Frame only stream will be included in the output.", - "locationName": "includeIframeOnlyStream", + "documentation": "When enabled, an I-Frame only stream will be included in the output.", + "locationName": "includeIframeOnlyStream", "shape": "__boolean" - }, + }, "PlaylistType": { - "documentation": "The HTTP Live Streaming (HLS) playlist type.\nWhen either \"EVENT\" or \"VOD\" is specified, a corresponding EXT-X-PLAYLIST-TYPE\nentry will be included in the media playlist.\n", - "locationName": "playlistType", + "documentation": "The HTTP Live Streaming (HLS) playlist type.\nWhen either \"EVENT\" or \"VOD\" is specified, a corresponding EXT-X-PLAYLIST-TYPE\nentry will be included in the media playlist.\n", + "locationName": "playlistType", "shape": "PlaylistType" - }, + }, "PlaylistWindowSeconds": { - "documentation": "Time window (in seconds) contained in each parent manifest.", - "locationName": "playlistWindowSeconds", + "documentation": "Time window (in seconds) contained in each parent manifest.", + "locationName": "playlistWindowSeconds", "shape": "__integer" - }, + }, "ProgramDateTimeIntervalSeconds": { - "documentation": "The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME tag\ninserted into manifests. Additionally, when an interval is specified\nID3Timed Metadata messages will be generated every 5 seconds using the\ningest time of the content.\nIf the interval is not specified, or set to 0, then\nno EXT-X-PROGRAM-DATE-TIME tags will be inserted into manifests and no\nID3Timed Metadata messages will be generated. Note that irrespective\nof this parameter, if any ID3 Timed Metadata is found in HTTP Live Streaming (HLS) input,\nit will be passed through to HLS output.\n", - "locationName": "programDateTimeIntervalSeconds", + "documentation": "The interval (in seconds) between each EXT-X-PROGRAM-DATE-TIME tag\ninserted into manifests. Additionally, when an interval is specified\nID3Timed Metadata messages will be generated every 5 seconds using the\ningest time of the content.\nIf the interval is not specified, or set to 0, then\nno EXT-X-PROGRAM-DATE-TIME tags will be inserted into manifests and no\nID3Timed Metadata messages will be generated. Note that irrespective\nof this parameter, if any ID3 Timed Metadata is found in HTTP Live Streaming (HLS) input,\nit will be passed through to HLS output.\n", + "locationName": "programDateTimeIntervalSeconds", "shape": "__integer" - }, + }, "SegmentDurationSeconds": { - "documentation": "Duration (in seconds) of each fragment. Actual fragments will be\nrounded to the nearest multiple of the source fragment duration.\n", - "locationName": "segmentDurationSeconds", + "documentation": "Duration (in seconds) of each fragment. Actual fragments will be\nrounded to the nearest multiple of the source fragment duration.\n", + "locationName": "segmentDurationSeconds", "shape": "__integer" - }, + }, "StreamSelection": { - "locationName": "streamSelection", + "locationName": "streamSelection", "shape": "StreamSelection" - }, + }, "UseAudioRenditionGroup": { - "documentation": "When enabled, audio streams will be placed in rendition groups in the output.", - "locationName": "useAudioRenditionGroup", + "documentation": "When enabled, audio streams will be placed in rendition groups in the output.", + "locationName": "useAudioRenditionGroup", "shape": "__boolean" } - }, + }, "type": "structure" - }, + }, "IngestEndpoint": { - "documentation": "An endpoint for ingesting source content for a Channel.", + "documentation": "An endpoint for ingesting source content for a Channel.", "members": { + "Id": { + "documentation": "The system generated unique identifier for the IngestEndpoint", + "locationName": "id", + "shape": "__string" + }, "Password": { - "documentation": "The system generated password for ingest authentication.", - "locationName": "password", + "documentation": "The system generated password for ingest authentication.", + "locationName": "password", "shape": "__string" - }, + }, "Url": { - "documentation": "The ingest URL to which the source stream should be sent.", - "locationName": "url", + "documentation": "The ingest URL to which the source stream should be sent.", + "locationName": "url", "shape": "__string" - }, + }, "Username": { - "documentation": "The system generated username for ingest authentication.", - "locationName": "username", + "documentation": "The system generated username for ingest authentication.", + "locationName": "username", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "InternalServerErrorException": { - "documentation": "An unexpected error occurred.", + "documentation": "An unexpected error occurred.", "error": { "httpStatusCode": 500 - }, - "exception": true, + }, + "exception": true, "members": { "Message": { - "locationName": "message", + "locationName": "message", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "ListChannelsRequest": { "members": { "MaxResults": { - "documentation": "Upper bound on number of records to return.", - "location": "querystring", - "locationName": "maxResults", + "documentation": "Upper bound on number of records to return.", + "location": "querystring", + "locationName": "maxResults", "shape": "MaxResults" - }, + }, "NextToken": { - "documentation": "A token used to resume pagination from the end of a previous request.", - "location": "querystring", - "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request.", + "location": "querystring", + "locationName": "nextToken", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "ListChannelsResponse": { "members": { "Channels": { - "documentation": "A list of Channel records.", - "locationName": "channels", + "documentation": "A list of Channel records.", + "locationName": "channels", "shape": "__listOfChannel" - }, + }, "NextToken": { - "documentation": "A token that can be used to resume pagination from the end of the collection.", - "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection.", + "locationName": "nextToken", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "ListOriginEndpointsRequest": { "members": { "ChannelId": { - "documentation": "When specified, the request will return only OriginEndpoints associated with the given Channel ID.", - "location": "querystring", - "locationName": "channelId", + "documentation": "When specified, the request will return only OriginEndpoints associated with the given Channel ID.", + "location": "querystring", + "locationName": "channelId", "shape": "__string" - }, + }, "MaxResults": { - "documentation": "The upper bound on the number of records to return.", - "location": "querystring", - "locationName": "maxResults", + "documentation": "The upper bound on the number of records to return.", + "location": "querystring", + "locationName": "maxResults", "shape": "MaxResults" - }, + }, "NextToken": { - "documentation": "A token used to resume pagination from the end of a previous request.", - "location": "querystring", - "locationName": "nextToken", + "documentation": "A token used to resume pagination from the end of a previous request.", + "location": "querystring", + "locationName": "nextToken", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "ListOriginEndpointsResponse": { "members": { "NextToken": { - "documentation": "A token that can be used to resume pagination from the end of the collection.", - "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection.", + "locationName": "nextToken", "shape": "__string" - }, + }, "OriginEndpoints": { - "documentation": "A list of OriginEndpoint records.", - "locationName": "originEndpoints", + "documentation": "A list of OriginEndpoint records.", + "locationName": "originEndpoints", "shape": "__listOfOriginEndpoint" } - }, + }, "type": "structure" - }, + }, "MaxResults": { - "max": 1000, - "min": 1, + "max": 1000, + "min": 1, "type": "integer" - }, + }, "MssEncryption": { - "documentation": "A Microsoft Smooth Streaming (MSS) encryption configuration.", + "documentation": "A Microsoft Smooth Streaming (MSS) encryption configuration.", "members": { "SpekeKeyProvider": { - "locationName": "spekeKeyProvider", + "locationName": "spekeKeyProvider", "shape": "SpekeKeyProvider" } - }, + }, "required": [ "SpekeKeyProvider" - ], + ], "type": "structure" - }, + }, "MssPackage": { - "documentation": "A Microsoft Smooth Streaming (MSS) packaging configuration.", + "documentation": "A Microsoft Smooth Streaming (MSS) packaging configuration.", "members": { "Encryption": { - "locationName": "encryption", + "locationName": "encryption", "shape": "MssEncryption" - }, + }, "ManifestWindowSeconds": { - "documentation": "The time window (in seconds) contained in each manifest.", - "locationName": "manifestWindowSeconds", + "documentation": "The time window (in seconds) contained in each manifest.", + "locationName": "manifestWindowSeconds", "shape": "__integer" - }, + }, "SegmentDurationSeconds": { - "documentation": "The duration (in seconds) of each segment.", - "locationName": "segmentDurationSeconds", + "documentation": "The duration (in seconds) of each segment.", + "locationName": "segmentDurationSeconds", "shape": "__integer" - }, + }, "StreamSelection": { - "locationName": "streamSelection", + "locationName": "streamSelection", "shape": "StreamSelection" } - }, + }, "type": "structure" - }, + }, "NotFoundException": { - "documentation": "The requested resource does not exist.", + "documentation": "The requested resource does not exist.", "error": { "httpStatusCode": 404 - }, - "exception": true, + }, + "exception": true, "members": { "Message": { - "locationName": "message", + "locationName": "message", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "OriginEndpoint": { - "documentation": "An OriginEndpoint resource configuration.", + "documentation": "An OriginEndpoint resource configuration.", "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", + "locationName": "arn", "shape": "__string" - }, + }, "ChannelId": { - "documentation": "The ID of the Channel the OriginEndpoint is associated with.", - "locationName": "channelId", + "documentation": "The ID of the Channel the OriginEndpoint is associated with.", + "locationName": "channelId", "shape": "__string" - }, + }, "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackage" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint.", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint.", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string appended to the end of the OriginEndpoint URL.", - "locationName": "manifestName", + "documentation": "A short string appended to the end of the OriginEndpoint URL.", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Url": { - "documentation": "The URL of the packaged OriginEndpoint for consumption.", - "locationName": "url", + "documentation": "The URL of the packaged OriginEndpoint for consumption.", + "locationName": "url", "shape": "__string" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "type": "structure" - }, + }, "OriginEndpointCreateParameters": { - "documentation": "Configuration parameters for a new OriginEndpoint.", + "documentation": "Configuration parameters for a new OriginEndpoint.", "members": { "ChannelId": { - "documentation": "The ID of the Channel that the OriginEndpoint will be associated with.\nThis cannot be changed after the OriginEndpoint is created.\n", - "locationName": "channelId", + "documentation": "The ID of the Channel that the OriginEndpoint will be associated with.\nThis cannot be changed after the OriginEndpoint is created.\n", + "locationName": "channelId", "shape": "__string" - }, + }, "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackageCreateOrUpdateParameters" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint. The ID must be unique within the region\nand it cannot be changed after the OriginEndpoint is created.\n", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint. The ID must be unique within the region\nand it cannot be changed after the OriginEndpoint is created.\n", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string that will be used as the filename of the OriginEndpoint URL (defaults to \"index\").", - "locationName": "manifestName", + "documentation": "A short string that will be used as the filename of the OriginEndpoint URL (defaults to \"index\").", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "required": [ - "Id", + "Id", "ChannelId" - ], + ], "type": "structure" - }, + }, "OriginEndpointList": { - "documentation": "A collection of OriginEndpoint records.", + "documentation": "A collection of OriginEndpoint records.", "members": { "NextToken": { - "documentation": "A token that can be used to resume pagination from the end of the collection.", - "locationName": "nextToken", + "documentation": "A token that can be used to resume pagination from the end of the collection.", + "locationName": "nextToken", "shape": "__string" - }, + }, "OriginEndpoints": { - "documentation": "A list of OriginEndpoint records.", - "locationName": "originEndpoints", + "documentation": "A list of OriginEndpoint records.", + "locationName": "originEndpoints", "shape": "__listOfOriginEndpoint" } - }, + }, "type": "structure" - }, + }, "OriginEndpointUpdateParameters": { - "documentation": "Configuration parameters for updating an existing OriginEndpoint.", + "documentation": "Configuration parameters for updating an existing OriginEndpoint.", "members": { "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackageCreateOrUpdateParameters" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "ManifestName": { - "documentation": "A short string that will be appended to the end of the Endpoint URL.", - "locationName": "manifestName", + "documentation": "A short string that will be appended to the end of the Endpoint URL.", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (in seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (in seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (in seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (in seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "type": "structure" - }, + }, "PlaylistType": { "enum": [ - "NONE", - "EVENT", + "NONE", + "EVENT", "VOD" - ], + ], "type": "string" - }, + }, "Profile": { "enum": [ - "NONE", + "NONE", "HBBTV_1_5" - ], + ], "type": "string" - }, + }, "RotateChannelCredentialsRequest": { + "deprecated": true, "members": { "Id": { - "documentation": "The ID of the channel to update.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of the channel to update.", + "location": "uri", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "RotateChannelCredentialsResponse": { + "deprecated": true, "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", + "locationName": "arn", "shape": "__string" - }, + }, "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "HlsIngest": { - "locationName": "hlsIngest", + "locationName": "hlsIngest", "shape": "HlsIngest" - }, + }, "Id": { - "documentation": "The ID of the Channel.", - "locationName": "id", + "documentation": "The ID of the Channel.", + "locationName": "id", "shape": "__string" } - }, + }, "type": "structure" - }, + }, + "RotateIngestEndpointCredentialsRequest": { + "members": { + "Id": { + "documentation": "The ID of the channel the IngestEndpoint is on.", + "location": "uri", + "locationName": "id", + "shape": "__string" + }, + "IngestEndpointId": { + "documentation": "The id of the IngestEndpoint whose credentials should be rotated", + "location": "uri", + "locationName": "ingest_endpoint_id", + "shape": "__string" + } + }, + "required": [ + "IngestEndpointId", + "Id" + ], + "type": "structure" + }, + "RotateIngestEndpointCredentialsResponse": { + "members": { + "Arn": { + "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", + "locationName": "arn", + "shape": "__string" + }, + "Description": { + "documentation": "A short text description of the Channel.", + "locationName": "description", + "shape": "__string" + }, + "HlsIngest": { + "locationName": "hlsIngest", + "shape": "HlsIngest" + }, + "Id": { + "documentation": "The ID of the Channel.", + "locationName": "id", + "shape": "__string" + } + }, + "type": "structure" + }, "ServiceUnavailableException": { - "documentation": "An unexpected error occurred.", + "documentation": "An unexpected error occurred.", "error": { "httpStatusCode": 503 - }, - "exception": true, + }, + "exception": true, "members": { "Message": { - "locationName": "message", + "locationName": "message", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "SpekeKeyProvider": { - "documentation": "A configuration for accessing an external Secure Packager and Encoder Key Exchange (SPEKE) service that will provide encryption keys.", + "documentation": "A configuration for accessing an external Secure Packager and Encoder Key Exchange (SPEKE) service that will provide encryption keys.", "members": { "ResourceId": { - "documentation": "The resource ID to include in key requests.", - "locationName": "resourceId", + "documentation": "The resource ID to include in key requests.", + "locationName": "resourceId", "shape": "__string" - }, + }, "RoleArn": { - "documentation": "An Amazon Resource Name (ARN) of an IAM role that AWS Elemental\nMediaPackage will assume when accessing the key provider service.\n", - "locationName": "roleArn", + "documentation": "An Amazon Resource Name (ARN) of an IAM role that AWS Elemental\nMediaPackage will assume when accessing the key provider service.\n", + "locationName": "roleArn", "shape": "__string" - }, + }, "SystemIds": { - "documentation": "The system IDs to include in key requests.", - "locationName": "systemIds", + "documentation": "The system IDs to include in key requests.", + "locationName": "systemIds", "shape": "__listOf__string" - }, + }, "Url": { - "documentation": "The URL of the external key provider service.", - "locationName": "url", + "documentation": "The URL of the external key provider service.", + "locationName": "url", "shape": "__string" } - }, + }, "required": [ - "Url", - "ResourceId", - "RoleArn", + "Url", + "ResourceId", + "RoleArn", "SystemIds" - ], + ], "type": "structure" - }, + }, "StreamOrder": { "enum": [ - "ORIGINAL", - "VIDEO_BITRATE_ASCENDING", + "ORIGINAL", + "VIDEO_BITRATE_ASCENDING", "VIDEO_BITRATE_DESCENDING" - ], + ], "type": "string" - }, + }, "StreamSelection": { - "documentation": "A StreamSelection configuration.", + "documentation": "A StreamSelection configuration.", "members": { "MaxVideoBitsPerSecond": { - "documentation": "The maximum video bitrate (bps) to include in output.", - "locationName": "maxVideoBitsPerSecond", + "documentation": "The maximum video bitrate (bps) to include in output.", + "locationName": "maxVideoBitsPerSecond", "shape": "__integer" - }, + }, "MinVideoBitsPerSecond": { - "documentation": "The minimum video bitrate (bps) to include in output.", - "locationName": "minVideoBitsPerSecond", + "documentation": "The minimum video bitrate (bps) to include in output.", + "locationName": "minVideoBitsPerSecond", "shape": "__integer" - }, + }, "StreamOrder": { - "documentation": "A directive that determines the order of streams in the output.", - "locationName": "streamOrder", + "documentation": "A directive that determines the order of streams in the output.", + "locationName": "streamOrder", "shape": "StreamOrder" } - }, + }, "type": "structure" - }, + }, "TooManyRequestsException": { - "documentation": "The client has exceeded their resource or throttling limits.", + "documentation": "The client has exceeded their resource or throttling limits.", "error": { "httpStatusCode": 429 - }, - "exception": true, + }, + "exception": true, "members": { "Message": { - "locationName": "message", + "locationName": "message", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "UnprocessableEntityException": { - "documentation": "The parameters sent in the request are not valid.", + "documentation": "The parameters sent in the request are not valid.", "error": { "httpStatusCode": 422 - }, - "exception": true, + }, + "exception": true, "members": { "Message": { - "locationName": "message", + "locationName": "message", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "UpdateChannelRequest": { - "documentation": "Configuration parameters used to update the Channel.", + "documentation": "Configuration parameters used to update the Channel.", "members": { "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "Id": { - "documentation": "The ID of the Channel to update.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of the Channel to update.", + "location": "uri", + "locationName": "id", "shape": "__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "UpdateChannelResponse": { "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the Channel.", + "locationName": "arn", "shape": "__string" - }, + }, "Description": { - "documentation": "A short text description of the Channel.", - "locationName": "description", + "documentation": "A short text description of the Channel.", + "locationName": "description", "shape": "__string" - }, + }, "HlsIngest": { - "locationName": "hlsIngest", + "locationName": "hlsIngest", "shape": "HlsIngest" - }, + }, "Id": { - "documentation": "The ID of the Channel.", - "locationName": "id", + "documentation": "The ID of the Channel.", + "locationName": "id", "shape": "__string" } - }, + }, "type": "structure" - }, + }, "UpdateOriginEndpointRequest": { - "documentation": "Configuration parameters used to update an existing OriginEndpoint.", + "documentation": "Configuration parameters used to update an existing OriginEndpoint.", "members": { "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackageCreateOrUpdateParameters" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint to update.", - "location": "uri", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint to update.", + "location": "uri", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string that will be appended to the end of the Endpoint URL.", - "locationName": "manifestName", + "documentation": "A short string that will be appended to the end of the Endpoint URL.", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (in seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (in seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (in seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (in seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "required": [ "Id" - ], + ], "type": "structure" - }, + }, "UpdateOriginEndpointResponse": { "members": { "Arn": { - "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", - "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) assigned to the OriginEndpoint.", + "locationName": "arn", "shape": "__string" - }, + }, "ChannelId": { - "documentation": "The ID of the Channel the OriginEndpoint is associated with.", - "locationName": "channelId", + "documentation": "The ID of the Channel the OriginEndpoint is associated with.", + "locationName": "channelId", "shape": "__string" - }, + }, "CmafPackage": { - "locationName": "cmafPackage", + "locationName": "cmafPackage", "shape": "CmafPackage" - }, + }, "DashPackage": { - "locationName": "dashPackage", + "locationName": "dashPackage", "shape": "DashPackage" - }, + }, "Description": { - "documentation": "A short text description of the OriginEndpoint.", - "locationName": "description", + "documentation": "A short text description of the OriginEndpoint.", + "locationName": "description", "shape": "__string" - }, + }, "HlsPackage": { - "locationName": "hlsPackage", + "locationName": "hlsPackage", "shape": "HlsPackage" - }, + }, "Id": { - "documentation": "The ID of the OriginEndpoint.", - "locationName": "id", + "documentation": "The ID of the OriginEndpoint.", + "locationName": "id", "shape": "__string" - }, + }, "ManifestName": { - "documentation": "A short string appended to the end of the OriginEndpoint URL.", - "locationName": "manifestName", + "documentation": "A short string appended to the end of the OriginEndpoint URL.", + "locationName": "manifestName", "shape": "__string" - }, + }, "MssPackage": { - "locationName": "mssPackage", + "locationName": "mssPackage", "shape": "MssPackage" - }, + }, "StartoverWindowSeconds": { - "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", - "locationName": "startoverWindowSeconds", + "documentation": "Maximum duration (seconds) of content to retain for startover playback.\nIf not specified, startover playback will be disabled for the OriginEndpoint.\n", + "locationName": "startoverWindowSeconds", "shape": "__integer" - }, + }, "TimeDelaySeconds": { - "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", - "locationName": "timeDelaySeconds", + "documentation": "Amount of delay (seconds) to enforce on the playback of live content.\nIf not specified, there will be no time delay in effect for the OriginEndpoint.\n", + "locationName": "timeDelaySeconds", "shape": "__integer" - }, + }, "Url": { - "documentation": "The URL of the packaged OriginEndpoint for consumption.", - "locationName": "url", + "documentation": "The URL of the packaged OriginEndpoint for consumption.", + "locationName": "url", "shape": "__string" - }, + }, "Whitelist": { - "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", - "locationName": "whitelist", + "documentation": "A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.", + "locationName": "whitelist", "shape": "__listOf__string" } - }, + }, "type": "structure" - }, + }, "__PeriodTriggersElement": { "enum": [ "ADS" - ], + ], "type": "string" - }, + }, "__boolean": { "type": "boolean" - }, + }, "__double": { "type": "double" - }, + }, "__integer": { "type": "integer" - }, + }, "__listOfChannel": { "member": { "shape": "Channel" - }, + }, "type": "list" - }, + }, "__listOfHlsManifest": { "member": { "shape": "HlsManifest" - }, + }, "type": "list" - }, + }, "__listOfHlsManifestCreateOrUpdateParameters": { "member": { "shape": "HlsManifestCreateOrUpdateParameters" - }, + }, "type": "list" - }, + }, "__listOfIngestEndpoint": { "member": { "shape": "IngestEndpoint" - }, + }, "type": "list" - }, + }, "__listOfOriginEndpoint": { "member": { "shape": "OriginEndpoint" - }, + }, "type": "list" - }, + }, "__listOf__PeriodTriggersElement": { "member": { "shape": "__PeriodTriggersElement" - }, + }, "type": "list" - }, + }, "__listOf__string": { "member": { "shape": "__string" - }, + }, "type": "list" - }, + }, "__long": { "type": "long" - }, + }, "__string": { "type": "string" } diff --git a/botocore/data/mq/2017-11-27/service-2.json b/botocore/data/mq/2017-11-27/service-2.json index 75c419ac..8598252e 100644 --- a/botocore/data/mq/2017-11-27/service-2.json +++ b/botocore/data/mq/2017-11-27/service-2.json @@ -36,7 +36,7 @@ "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." }, { "shape" : "ConflictException", - "documentation" : "HTTP Status Code 409: Conflict. This Broker name already exists. Retry your request with another name." + "documentation" : "HTTP Status Code 409: Conflict. This broker name already exists. Retry your request with another name." }, { "shape" : "ForbiddenException", "documentation" : "HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request." @@ -97,7 +97,7 @@ "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." }, { "shape" : "ConflictException", - "documentation" : "HTTP Status Code 409: Conflict. Retry your request." + "documentation" : "HTTP Status Code 409: Conflict. Retrying your request might resolve the issue." }, { "shape" : "ForbiddenException", "documentation" : "HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request." @@ -440,6 +440,9 @@ }, { "shape" : "InternalServerErrorException", "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." + }, { + "shape" : "ConflictException", + "documentation" : "HTTP Status Code 409: Conflict. Concurrent broker update detected. Retrying your request might resolve the issue." }, { "shape" : "ForbiddenException", "documentation" : "HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request." @@ -471,7 +474,7 @@ "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." }, { "shape" : "ConflictException", - "documentation" : "HTTP Status Code 409: Conflict. This configuration name already exists. Retry your request with another configuration name." + "documentation" : "HTTP Status Code 409: Conflict. Concurrent update to configuration. Retry to create a new revision." }, { "shape" : "ForbiddenException", "documentation" : "HTTP Status Code 403: Access forbidden. Correct your input and then retry your request." @@ -503,7 +506,7 @@ "documentation" : "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue." }, { "shape" : "ConflictException", - "documentation" : "HTTP Status Code 409: Conflict. Retry your request." + "documentation" : "HTTP Status Code 409: Conflict. Retrying your request might resolve the issue." }, { "shape" : "ForbiddenException", "documentation" : "HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request." @@ -548,7 +551,7 @@ "IpAddress" : { "shape" : "__string", "locationName" : "ipAddress", - "documentation" : "The IP address of the ENI attached to the broker." + "documentation" : "The IP address of the Elastic Network Interface (ENI) attached to the broker." } }, "documentation" : "Returns information about all brokers." @@ -765,7 +768,7 @@ "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", - "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "HostInstanceType" : { "shape" : "__string", @@ -858,7 +861,7 @@ "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", - "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "HostInstanceType" : { "shape" : "__string", @@ -924,7 +927,7 @@ "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", - "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "Name" : { "shape" : "__string", @@ -976,7 +979,7 @@ "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", - "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "Required. The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "Name" : { "shape" : "__string", @@ -1195,7 +1198,7 @@ "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", - "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "HostInstanceType" : { "shape" : "__string", @@ -1212,6 +1215,11 @@ "locationName" : "maintenanceWindowStartTime", "documentation" : "The parameters that determine the WeeklyStartTime." }, + "PendingEngineVersion" : { + "shape" : "__string", + "locationName" : "pendingEngineVersion", + "documentation" : "The version of the broker engine to upgrade to." + }, "PubliclyAccessible" : { "shape" : "__boolean", "locationName" : "publiclyAccessible", @@ -1233,7 +1241,7 @@ "documentation" : "The list of all ActiveMQ usernames for the specified broker." } }, - "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "DescribeBrokerRequest" : { "type" : "structure", @@ -1303,7 +1311,7 @@ "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", - "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.0." + "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." }, "HostInstanceType" : { "shape" : "__string", @@ -1320,6 +1328,11 @@ "locationName" : "maintenanceWindowStartTime", "documentation" : "The parameters that determine the WeeklyStartTime." }, + "PendingEngineVersion" : { + "shape" : "__string", + "locationName" : "pendingEngineVersion", + "documentation" : "The version of the broker engine to upgrade to." + }, "PubliclyAccessible" : { "shape" : "__boolean", "locationName" : "publiclyAccessible", @@ -1891,7 +1904,7 @@ "AuditLogGroup" : { "shape" : "__string", "locationName" : "auditLogGroup", - "documentation" : "Location of CloudWatch Log group where audit logs will be sent." + "documentation" : "The location of the CloudWatch Logs log group where audit logs are sent." }, "General" : { "shape" : "__boolean", @@ -1901,7 +1914,7 @@ "GeneralLogGroup" : { "shape" : "__string", "locationName" : "generalLogGroup", - "documentation" : "Location of CloudWatch Log group where general logs will be sent." + "documentation" : "The location of the CloudWatch Logs log group where general logs are sent." }, "Pending" : { "shape" : "PendingLogs", @@ -2017,11 +2030,21 @@ "UpdateBrokerInput" : { "type" : "structure", "members" : { + "AutoMinorVersionUpgrade" : { + "shape" : "__boolean", + "locationName" : "autoMinorVersionUpgrade", + "documentation" : "Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions. The automatic upgrades occur during the maintenance window of the broker or after a manual broker reboot." + }, "Configuration" : { "shape" : "ConfigurationId", "locationName" : "configuration", "documentation" : "A list of information about the configuration." }, + "EngineVersion" : { + "shape" : "__string", + "locationName" : "engineVersion", + "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", @@ -2033,6 +2056,11 @@ "UpdateBrokerOutput" : { "type" : "structure", "members" : { + "AutoMinorVersionUpgrade" : { + "shape" : "__boolean", + "locationName" : "autoMinorVersionUpgrade", + "documentation" : "The new value of automatic upgrades to new minor version for brokers." + }, "BrokerId" : { "shape" : "__string", "locationName" : "brokerId", @@ -2043,6 +2071,11 @@ "locationName" : "configuration", "documentation" : "The ID of the updated configuration." }, + "EngineVersion" : { + "shape" : "__string", + "locationName" : "engineVersion", + "documentation" : "The version of the broker engine to upgrade to." + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", @@ -2054,6 +2087,11 @@ "UpdateBrokerRequest" : { "type" : "structure", "members" : { + "AutoMinorVersionUpgrade" : { + "shape" : "__boolean", + "locationName" : "autoMinorVersionUpgrade", + "documentation" : "Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions. The automatic upgrades occur during the maintenance window of the broker or after a manual broker reboot." + }, "BrokerId" : { "shape" : "__string", "location" : "uri", @@ -2065,6 +2103,11 @@ "locationName" : "configuration", "documentation" : "A list of information about the configuration." }, + "EngineVersion" : { + "shape" : "__string", + "locationName" : "engineVersion", + "documentation" : "The version of the broker engine. Note: Currently, Amazon MQ supports only 5.15.6 and 5.15.0." + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", @@ -2077,6 +2120,11 @@ "UpdateBrokerResponse" : { "type" : "structure", "members" : { + "AutoMinorVersionUpgrade" : { + "shape" : "__boolean", + "locationName" : "autoMinorVersionUpgrade", + "documentation" : "The new value of automatic upgrades to new minor version for brokers." + }, "BrokerId" : { "shape" : "__string", "locationName" : "brokerId", @@ -2087,6 +2135,11 @@ "locationName" : "configuration", "documentation" : "The ID of the updated configuration." }, + "EngineVersion" : { + "shape" : "__string", + "locationName" : "engineVersion", + "documentation" : "The version of the broker engine to upgrade to." + }, "Logs" : { "shape" : "Logs", "locationName" : "logs", @@ -2431,4 +2484,4 @@ } }, "documentation" : "Amazon MQ is a managed message broker service for Apache ActiveMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols." -} \ No newline at end of file +} diff --git a/botocore/data/neptune/2014-10-31/service-2.sdk-extras.json b/botocore/data/neptune/2014-10-31/service-2.sdk-extras.json new file mode 100644 index 00000000..85e8a104 --- /dev/null +++ b/botocore/data/neptune/2014-10-31/service-2.sdk-extras.json @@ -0,0 +1,23 @@ + { + "version": 1.0, + "merge": { + "shapes": { + "CopyDBClusterSnapshotMessage": { + "members": { + "SourceRegion": { + "shape": "String", + "documentation": "

The ID of the region that contains the snapshot to be copied.

" + } + } + }, + "CreateDBClusterMessage": { + "members": { + "SourceRegion": { + "shape": "String", + "documentation": "

The ID of the region that contains the source for the db cluster.

" + } + } + } + } + } +} diff --git a/botocore/data/opsworkscm/2016-11-01/service-2.json b/botocore/data/opsworkscm/2016-11-01/service-2.json index cbeee334..c4b4ba9d 100644 --- a/botocore/data/opsworkscm/2016-11-01/service-2.json +++ b/botocore/data/opsworkscm/2016-11-01/service-2.json @@ -175,6 +175,21 @@ ], "documentation":"

Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes. After a node is disassociated, the node key pair is no longer valid for accessing the configuration manager's API. For more information about how to associate a node, see AssociateNode.

A node can can only be disassociated from a server that is in a HEALTHY state. Otherwise, an InvalidStateException is thrown. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid.

" }, + "ExportServerEngineAttribute":{ + "name":"ExportServerEngineAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportServerEngineAttributeRequest"}, + "output":{"shape":"ExportServerEngineAttributeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidStateException"} + ], + "documentation":"

Exports a specified server engine attribute as a base64-encoded string. For example, you can export user data that you can use in EC2 to associate nodes with a server.

This operation is synchronous.

A ValidationException is raised when parameters of the request are not valid. A ResourceNotFoundException is thrown when the server does not exist. An InvalidStateException is thrown when the server is in any of the following states: CREATING, TERMINATED, FAILED or DELETING.

" + }, "RestoreServer":{ "name":"RestoreServer", "http":{ @@ -599,11 +614,11 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null. Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.

" + "documentation":"

This is not currently implemented for DescribeBackups requests.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + "documentation":"

This is not currently implemented for DescribeBackups requests.

" } } }, @@ -616,7 +631,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null. Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.

" + "documentation":"

This is not currently implemented for DescribeBackups requests.

" } } }, @@ -690,11 +705,11 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null. Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.

" + "documentation":"

This is not currently implemented for DescribeServers requests.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + "documentation":"

This is not currently implemented for DescribeServers requests.

" } } }, @@ -707,7 +722,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null. Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.

" + "documentation":"

This is not currently implemented for DescribeServers requests.

" } } }, @@ -764,6 +779,40 @@ "type":"list", "member":{"shape":"EngineAttribute"} }, + "ExportServerEngineAttributeRequest":{ + "type":"structure", + "required":[ + "ExportAttributeName", + "ServerName" + ], + "members":{ + "ExportAttributeName":{ + "shape":"String", + "documentation":"

The name of the export attribute. Currently supported export attribute is \"Userdata\" which exports a userdata script filled out with parameters provided in the InputAttributes list.

" + }, + "ServerName":{ + "shape":"ServerName", + "documentation":"

The name of the Server to which the attribute is being exported from

" + }, + "InputAttributes":{ + "shape":"EngineAttributes", + "documentation":"

The list of engine attributes. The list type is EngineAttribute. EngineAttribute is a pair of attribute name and value. For ExportAttributeName \"Userdata\", currently supported input attribute names are: - \"RunList\": For Chef, an ordered list of roles and/or recipes that are run in the exact order. For Puppet, this parameter is ignored. - \"OrganizationName\": For Chef, an organization name. AWS OpsWorks for Chef Server always creates the organization \"default\". For Puppet, this parameter is ignored. - \"NodeEnvironment\": For Chef, a node environment (eg. development, staging, onebox). For Puppet, this parameter is ignored. - \"NodeClientVersion\": For Chef, version of Chef Engine (3 numbers separated by dots, eg. \"13.8.5\"). If empty, it uses the latest one. For Puppet, this parameter is ignored.

" + } + } + }, + "ExportServerEngineAttributeResponse":{ + "type":"structure", + "members":{ + "EngineAttribute":{ + "shape":"EngineAttribute", + "documentation":"

The requested engine attribute pair with attribute name and value.

" + }, + "ServerName":{ + "shape":"ServerName", + "documentation":"

The requested ServerName.

" + } + } + }, "InstanceProfileArn":{ "type":"string", "pattern":"arn:aws:iam::[0-9]{12}:instance-profile/.*" @@ -1143,5 +1192,5 @@ "exception":true } }, - "documentation":"AWS OpsWorks CM

AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers.

Glossary of terms

  • Server: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted.

  • Engine: The engine is the specific configuration manager that you want to use. Valid values in this release include Chef and Puppet.

  • Backup: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts.

  • Events: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted.

  • Account attributes: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account.

Endpoints

AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created.

  • opsworks-cm.us-east-1.amazonaws.com

  • opsworks-cm.us-west-2.amazonaws.com

  • opsworks-cm.eu-west-1.amazonaws.com

Throttling limits

All API operations allow for five requests per second with a burst of 10 requests per second.

" + "documentation":"AWS OpsWorks CM

AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage.

Glossary of terms

  • Server: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted.

  • Engine: The engine is the specific configuration manager that you want to use. Valid values in this release include Chef and Puppet.

  • Backup: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts.

  • Events: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted.

  • Account attributes: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account.

Endpoints

AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created.

  • opsworks-cm.us-east-1.amazonaws.com

  • opsworks-cm.us-east-2.amazonaws.com

  • opsworks-cm.us-west-1.amazonaws.com

  • opsworks-cm.us-west-2.amazonaws.com

  • opsworks-cm.ap-northeast-1.amazonaws.com

  • opsworks-cm.ap-southeast-1.amazonaws.com

  • opsworks-cm.ap-southeast-2.amazonaws.com

  • opsworks-cm.eu-central-1.amazonaws.com

  • opsworks-cm.eu-west-1.amazonaws.com

Throttling limits

All API operations allow for five requests per second with a burst of 10 requests per second.

" } diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index 9adbd76f..3265dbdd 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -10,7 +10,6 @@ "serviceId":"Organizations", "signatureVersion":"v4", "targetPrefix":"AWSOrganizationsV20161128", - "timestampFormat":"unixTimestamp", "uid":"organizations-2016-11-28" }, "operations":{ @@ -35,7 +34,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

This operation can be called only by the following principals when they also have the relevant IAM permissions:

  • Invitation to join or Approve all features request handshakes: only a principal from the member account.

    The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named OrgsServiceLinkedRoleName. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

  • Enable all features final confirmation handshake: only a principal from the master account.

    For more information about invitations, see Inviting an AWS Account to Join Your Organization in the AWS Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that it is deleted.

" + "documentation":"

Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

This operation can be called only by the following principals when they also have the relevant IAM permissions:

  • Invitation to join or Approve all features request handshakes: only a principal from the member account.

    The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

  • Enable all features final confirmation handshake: only a principal from the master account.

    For more information about invitations, see Inviting an AWS Account to Join Your Organization in the AWS Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that it is deleted.

" }, "AttachPolicy":{ "name":"AttachPolicy", @@ -97,7 +96,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. If you want to check the status of the request later, you need the OperationId response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

The user who calls the API for an invitation to join must have the organizations:CreateAccount permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named OrgsServiceLinkedRoleName. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

The user in the master account who calls this API must also have the iam:CreateRole permission because AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the End User Licence Agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, then contact AWS Customer Support.

  • Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable this, then only the account root user can access billing information. For information about how to disable this for an account, see Granting Access to Your Billing Information and Tools.

" + "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateOrganization":{ "name":"CreateOrganization", @@ -197,7 +196,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes the organization. You can delete an organization only by using credentials from the master account. The organization must be empty of member accounts, organizational units (OUs), and policies.

" + "documentation":"

Deletes the organization. You can delete an organization only by using credentials from the master account. The organization must be empty of member accounts.

" }, "DeleteOrganizationalUnit":{ "name":"DeleteOrganizationalUnit", @@ -473,6 +472,7 @@ "errors":[ {"shape":"AccessDeniedException"}, {"shape":"AWSOrganizationsNotInUseException"}, + {"shape":"AccountOwnerNotVerifiedException"}, {"shape":"ConcurrentModificationException"}, {"shape":"HandshakeConstraintViolationException"}, {"shape":"DuplicateHandshakeException"}, @@ -821,7 +821,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

Your account is not a member of an organization. To make this request, you must use the credentials of an account that belongs to an organization.

", + "documentation":"

Your account isn't a member of an organization. To make this request, you must use the credentials of an account that belongs to an organization.

", "exception":true }, "AcceptHandshakeRequest":{ @@ -857,7 +857,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"AccessDeniedForDependencyExceptionReason"} }, - "documentation":"

The operation you attempted requires you to have the iam:CreateServiceLinkedRole so that Organizations can create the required service-linked role. You do not have that permission.

", + "documentation":"

The operation that you attempted requires you to have the iam:CreateServiceLinkedRole so that AWS Organizations can create the required service-linked role. You don't have that permission.

", "exception":true }, "AccessDeniedForDependencyExceptionReason":{ @@ -917,6 +917,7 @@ "type":"string", "max":50, "min":1, + "pattern":"[\\u0020-\\u007E]+", "sensitive":true }, "AccountNotFoundException":{ @@ -924,7 +925,15 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find an AWS account with the AccountId that you specified, or the account whose credentials you used to make this request is not a member of an organization.

", + "documentation":"

We can't find an AWS account with the AccountId that you specified, or the account whose credentials you used to make this request isn't a member of an organization.

", + "exception":true + }, + "AccountOwnerNotVerifiedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

You can't invite an existing account to your organization until you verify that you own the email address associated with the master account. For more information, see Email Address Verification in the AWS Organizations User Guide.

", "exception":true }, "AccountStatus":{ @@ -1015,7 +1024,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find an organizational unit (OU) or AWS account with the ChildId that you specified.

", + "documentation":"

We can't find an organizational unit (OU) or AWS account with the ChildId that you specified.

", "exception":true }, "ChildType":{ @@ -1043,7 +1052,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to removing the last SCP from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or, The number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations, or contact AWS Support to request an increase in the number of accounts.

    Note: deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Customer Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes you can send in one day.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of organizational units you can have in an organization.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an organizational unit tree that is too many levels deep.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports consolidated billing features only cannot perform this operation.

  • POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that does not yet have enough information to exist as a stand-alone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that does not yet have enough information to exist as a stand-alone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to removing the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contactAWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this master account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1064,7 +1073,9 @@ "ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED", "MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE", "MASTER_ACCOUNT_MISSING_CONTACT_INFO", - "ORGANIZATION_NOT_IN_ALL_FEATURES_MODE" + "ORGANIZATION_NOT_IN_ALL_FEATURES_MODE", + "EMAIL_VERIFICATION_CODE_EXPIRED", + "WAIT_PERIOD_ACTIVE" ] }, "CreateAccountFailureReason":{ @@ -1087,7 +1098,7 @@ "members":{ "Email":{ "shape":"Email", - "documentation":"

The email address of the owner to assign to the new member account. This email address must not already be associated with another AWS account. You must use a valid email address to complete account creation. You cannot access the root user of the account or remove an account that was created with an invalid email address.

" + "documentation":"

The email address of the owner to assign to the new member account. This email address must not already be associated with another AWS account. You must use a valid email address to complete account creation. You can't access the root user of the account or remove an account that was created with an invalid email address.

" }, "AccountName":{ "shape":"AccountName", @@ -1095,11 +1106,11 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

(Optional)

The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account.

If you do not specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the AWS Organizations User Guide, and steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using IAM Roles in the IAM User Guide.

The regex pattern that is used to validate this parameter is a string of characters that can consist of uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" + "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the AWS Organizations User Guide, and steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using IAM Roles in the IAM User Guide.

The regex pattern that is used to validate this parameter is a string of characters that can consist of uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", - "documentation":"

If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, then only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

If you do not specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

" + "documentation":"

If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

" } } }, @@ -1112,7 +1123,7 @@ "members":{ "CreateAccountStatus":{ "shape":"CreateAccountStatus", - "documentation":"

A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times.

" + "documentation":"

A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times. You can also check the AWS CloudTrail log for the CreateAccountResult event. For more information, see Monitoring the Activity in Your Organization in the AWS Organizations User Guide.

" } } }, @@ -1167,7 +1178,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find an create account request with the CreateAccountRequestId that you specified.

", + "documentation":"

We can't find an create account request with the CreateAccountRequestId that you specified.

", "exception":true }, "CreateAccountStatuses":{ @@ -1402,7 +1413,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find the destination container (a root or OU) with the ParentId that you specified.

", + "documentation":"

We can't find the destination container (a root or OU) with the ParentId that you specified.

", "exception":true }, "DetachPolicyRequest":{ @@ -1479,7 +1490,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

An organizational unit (OU) with the same name already exists.

", + "documentation":"

An OU with the same name already exists.

", "exception":true }, "DuplicatePolicyAttachmentException":{ @@ -1580,7 +1591,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

AWS Organizations could not perform the operation because your organization has not finished initializing. This can take up to an hour. Try again later. If after one hour you continue to receive this error, contact AWS Customer Support.

", + "documentation":"

AWS Organizations couldn't perform the operation because your organization hasn't finished initializing. This can take up to an hour. Try again later. If after one hour you continue to receive this error, contact AWS Support.

", "exception":true }, "GenericArn":{ @@ -1643,7 +1654,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"HandshakeConstraintViolationExceptionReason"} }, - "documentation":"

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note: deleted and closed accounts still count toward your limit.

    If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Customer Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes you can send in one day.

  • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

  • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

  • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You cannot issue new invitations to join an organization while it is in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

  • PAYMENT_INSTRUMENT_REQUIRED: You cannot complete the operation with an account that does not have a payment instrument, such as a credit card, associated with it.

  • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

  • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

", + "documentation":"

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

    If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

  • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

  • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

  • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

  • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

  • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

", "exception":true }, "HandshakeConstraintViolationExceptionReason":{ @@ -1682,7 +1693,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find a handshake with the HandshakeId that you specified.

", + "documentation":"

We can't find a handshake with the HandshakeId that you specified.

", "exception":true }, "HandshakeNotes":{ @@ -1792,7 +1803,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You can't perform the operation on the handshake in its current state. For example, you can't cancel a handshake that was already accepted, or accept a handshake that was already declined.

", + "documentation":"

You can't perform the operation on the handshake in its current state. For example, you can't cancel a handshake that was already accepted or accept a handshake that was already declined.

", "exception":true }, "InvalidInputException":{ @@ -1801,7 +1812,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"InvalidInputExceptionReason"} }, - "documentation":"

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLE_POLICY: You specified a policy that is managed by AWS and cannot be modified.

  • INPUT_REQUIRED: You must include a value for all required parameters.

  • INVALID_ENUM: You specified a value that is not valid for that parameter.

  • INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid characters.

  • INVALID_LIST_MEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALID_PATTERN: You provided a value that doesn't match the required pattern.

  • INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't match the required pattern.

  • INVALID_ROLE_NAME: You provided a role name that is not valid. A role name can’t begin with the reserved prefix 'AWSServiceRoleFor'.

  • INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid ARN for the organization.

  • INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID.

  • MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter for the operation.

  • MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer than allowed.

  • MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter than allowed.

  • MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only between entities in the same root.

", + "documentation":"

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLE_POLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUT_REQUIRED: You must include a value for all required parameters.

  • INVALID_ENUM: You specified a value that isn't valid for that parameter.

  • INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid characters.

  • INVALID_LIST_MEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALID_PATTERN: You provided a value that doesn't match the required pattern.

  • INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't match the required pattern.

  • INVALID_ROLE_NAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID.

  • MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter for the operation.

  • MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer than allowed.

  • MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter than allowed.

  • MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only between entities in the same root.

", "exception":true }, "InvalidInputExceptionReason":{ @@ -1834,7 +1845,7 @@ "members":{ "Target":{ "shape":"HandshakeParty", - "documentation":"

The identifier (ID) of the AWS account that you want to invite to join your organization. This is a JSON object that contains the following elements:

{ \"Type\": \"ACCOUNT\", \"Id\": \"< account id number >\" }

If you use the AWS CLI, you can submit this as a single string, similar to the following example:

--target Id=123456789012,Type=ACCOUNT

If you specify \"Type\": \"ACCOUNT\", then you must provide the AWS account ID number as the Id. If you specify \"Type\": \"EMAIL\", then you must specify the email address that is associated with the account.

--target Id=bill@example.com,Type=EMAIL

" + "documentation":"

The identifier (ID) of the AWS account that you want to invite to join your organization. This is a JSON object that contains the following elements:

{ \"Type\": \"ACCOUNT\", \"Id\": \"< account id number >\" }

If you use the AWS CLI, you can submit this as a single string, similar to the following example:

--target Id=123456789012,Type=ACCOUNT

If you specify \"Type\": \"ACCOUNT\", then you must provide the AWS account ID number as the Id. If you specify \"Type\": \"EMAIL\", then you must specify the email address that is associated with the account.

--target Id=diego@example.com,Type=EMAIL

" }, "Notes":{ "shape":"HandshakeNotes", @@ -1860,7 +1871,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -1891,7 +1902,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -1917,7 +1928,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -1955,7 +1966,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -1985,7 +1996,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2015,7 +2026,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2045,7 +2056,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2076,7 +2087,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2107,7 +2118,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2145,7 +2156,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2176,7 +2187,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2202,7 +2213,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2233,7 +2244,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" + "documentation":"

(Optional) Use this to limit the number of results you want included per page in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

" } } }, @@ -2255,7 +2266,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The provided policy document does not meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

", + "documentation":"

The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

", "exception":true }, "MasterCannotLeaveOrganizationException":{ @@ -2349,7 +2360,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The organization isn't empty. To delete an organization, you must first remove all accounts except the master account, delete all organizational units (OUs), and delete all policies.

", + "documentation":"

The organization isn't empty. To delete an organization, you must first remove all accounts except the master account, delete all OUs, and delete all policies.

", "exception":true }, "OrganizationalUnit":{ @@ -2388,7 +2399,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The specified organizational unit (OU) is not empty. Move all accounts to another root or to other OUs, remove all child OUs, and then try the operation again.

", + "documentation":"

The specified OU is not empty. Move all accounts to another root or to other OUs, remove all child OUs, and try the operation again.

", "exception":true }, "OrganizationalUnitNotFoundException":{ @@ -2396,7 +2407,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find an organizational unit (OU) with the OrganizationalUnitId that you specified.

", + "documentation":"

We can't find an OU with the OrganizationalUnitId that you specified.

", "exception":true }, "OrganizationalUnits":{ @@ -2426,7 +2437,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find a root or organizational unit (OU) with the ParentId that you specified.

", + "documentation":"

We can't find a root or OU with the ParentId that you specified.

", "exception":true }, "ParentType":{ @@ -2480,7 +2491,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The policy is attached to one or more entities. You must detach it from all roots, organizational units (OUs), and accounts before performing this operation.

", + "documentation":"

The policy is attached to one or more entities. You must detach it from all roots, OUs, and accounts before performing this operation.

", "exception":true }, "PolicyName":{ @@ -2501,7 +2512,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find a policy with the PolicyId that you specified.

", + "documentation":"

We can't find a policy with the PolicyId that you specified.

", "exception":true }, "PolicySummary":{ @@ -2581,7 +2592,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable service control policies (SCPs) only after you enable all features in the organization. For more information, see Enabling and Disabling a Policy Type on a Root in the AWS Organizations User Guide.

", + "documentation":"

You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Enabling and Disabling a Policy Type on a Root in the AWS Organizations User Guide.

", "exception":true }, "PolicyTypeNotEnabledException":{ @@ -2589,7 +2600,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The specified policy type is not currently enabled in this root. You cannot attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

", + "documentation":"

The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

", "exception":true }, "PolicyTypeStatus":{ @@ -2672,7 +2683,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find a root with the RootId that you specified.

", + "documentation":"

We can't find a root with the RootId that you specified.

", "exception":true }, "Roots":{ @@ -2698,7 +2709,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find a source root or OU with the ParentId that you specified.

", + "documentation":"

We can't find a source root or OU with the ParentId that you specified.

", "exception":true }, "TargetName":{ @@ -2711,7 +2722,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

We can't find a root, OU, or account with the TargetId that you specified.

", + "documentation":"

We can't find a root, OU, or account with the TargetId that you specified.

", "exception":true }, "TargetType":{ diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index 8d78aed4..bfd64915 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -358,6 +358,7 @@ "LanguageCode":{ "type":"string", "enum":[ + "cmn-CN", "cy-GB", "da-DK", "de-DE", @@ -822,7 +823,7 @@ }, "OutputFormat":{ "shape":"OutputFormat", - "documentation":"

The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

" + "documentation":"

The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.

When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.

" }, "SampleRate":{ "shape":"SampleRate", @@ -1007,7 +1008,8 @@ "Vicki", "Takumi", "Seoyeon", - "Aditi" + "Aditi", + "Zhiyu" ] }, "VoiceList":{ diff --git a/botocore/data/rds/2014-09-01/service-2.json b/botocore/data/rds/2014-09-01/service-2.json index ef321ed6..727e854f 100644 --- a/botocore/data/rds/2014-09-01/service-2.json +++ b/botocore/data/rds/2014-09-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"rds", "serviceAbbreviation":"Amazon RDS", "serviceFullName":"Amazon Relational Database Service", + "serviceId":"RDS", "signatureVersion":"v4", "xmlNamespace":"http://rds.amazonaws.com/doc/2014-09-01/", "protocol":"query" diff --git a/botocore/data/rds/2014-10-31/paginators-1.json b/botocore/data/rds/2014-10-31/paginators-1.json index fdae7c73..3ca4c189 100644 --- a/botocore/data/rds/2014-10-31/paginators-1.json +++ b/botocore/data/rds/2014-10-31/paginators-1.json @@ -108,6 +108,12 @@ "limit_key": "NumberOfLines", "more_results": "AdditionalDataPending", "result_key": "LogFileData" + }, + "DescribeDBClusters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusters" } } } diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 66e98966..fe4e76ad 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -25,7 +25,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterRoleQuotaExceededFault"} ], - "documentation":"

Associates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora to Access Other AWS Services On Your Behalf.

" + "documentation":"

Associates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf in the Amazon Aurora User Guide.

" }, "AddSourceIdentifierToSubscription":{ "name":"AddSourceIdentifierToSubscription", @@ -70,7 +70,9 @@ "resultWrapper":"ApplyPendingMaintenanceActionResult" }, "errors":[ - {"shape":"ResourceNotFoundFault"} + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBInstanceStateFault"} ], "documentation":"

Applies a pending maintenance action to a resource (for example, to a DB instance).

" }, @@ -108,7 +110,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon RDS User Guide.

" + "documentation":"

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.

" }, "CopyDBClusterParameterGroup":{ "name":"CopyDBClusterParameterGroup", @@ -147,7 +149,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a DB Cluster Snapshot in the Same Account, Either in the Same Region or Across Regions in the Amazon RDS User Guide.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CopyDBParameterGroup":{ "name":"CopyDBParameterGroup", @@ -233,7 +235,7 @@ {"shape":"DBInstanceNotFoundFault"}, {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"} ], - "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CreateDBClusterParameterGroup":{ "name":"CreateDBClusterParameterGroup", @@ -250,7 +252,7 @@ {"shape":"DBParameterGroupQuotaExceededFault"}, {"shape":"DBParameterGroupAlreadyExistsFault"} ], - "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CreateDBClusterSnapshot":{ "name":"CreateDBClusterSnapshot", @@ -270,7 +272,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterSnapshotStateFault"} ], - "documentation":"

Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CreateDBInstance":{ "name":"CreateDBInstance", @@ -337,7 +339,7 @@ {"shape":"StorageTypeNotSupportedFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas.

Amazon Aurora doesn't support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified following.

Your source DB instance must have backup retention enabled.

" + "documentation":"

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified following.

Your source DB instance must have backup retention enabled.

" }, "CreateDBParameterGroup":{ "name":"CreateDBParameterGroup", @@ -470,7 +472,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterSnapshotStateFault"} ], - "documentation":"

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DeleteDBClusterParameterGroup":{ "name":"DeleteDBClusterParameterGroup", @@ -483,7 +485,7 @@ {"shape":"InvalidDBParameterGroupStateFault"}, {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DeleteDBClusterSnapshot":{ "name":"DeleteDBClusterSnapshot", @@ -500,7 +502,7 @@ {"shape":"InvalidDBClusterSnapshotStateFault"}, {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DeleteDBInstance":{ "name":"DeleteDBInstance", @@ -653,7 +655,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"DBClusterBacktrackNotFoundFault"} ], - "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusterParameterGroups":{ "name":"DescribeDBClusterParameterGroups", @@ -669,7 +671,7 @@ "errors":[ {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusterParameters":{ "name":"DescribeDBClusterParameters", @@ -685,7 +687,7 @@ "errors":[ {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusterSnapshotAttributes":{ "name":"DescribeDBClusterSnapshotAttributes", @@ -717,7 +719,7 @@ "errors":[ {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Returns information about DB cluster snapshots. This API action supports pagination.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Returns information about DB cluster snapshots. This API action supports pagination.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusters":{ "name":"DescribeDBClusters", @@ -733,7 +735,7 @@ "errors":[ {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Returns information about provisioned Aurora DB clusters. This API supports pagination.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Returns information about provisioned Aurora DB clusters. This API supports pagination.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBEngineVersions":{ "name":"DescribeDBEngineVersions", @@ -887,7 +889,7 @@ "shape":"DescribeEngineDefaultClusterParametersResult", "resultWrapper":"DescribeEngineDefaultClusterParametersResult" }, - "documentation":"

Returns the default engine and system parameter information for the cluster database engine.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Returns the default engine and system parameter information for the cluster database engine.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeEngineDefaultParameters":{ "name":"DescribeEngineDefaultParameters", @@ -1097,7 +1099,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

Amazon Aurora will automatically fail over to an Aurora Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

Amazon Aurora will automatically fail over to an Aurora Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1115,7 +1117,7 @@ {"shape":"DBSnapshotNotFoundFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Lists all tags on an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

" + "documentation":"

Lists all tags on an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, "ModifyCurrentDBClusterCapacity":{ "name":"ModifyCurrentDBClusterCapacity", @@ -1133,7 +1135,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBClusterCapacityFault"} ], - "documentation":"

Set the capacity of an Aurora Serverless DB cluster to a specific value.

Aurora Serverless scales seamlessly based on the workload on the DB cluster. In some cases, the capacity might not scale fast enough to meet a sudden change in workload, such as a large number of new transactions. Call ModifyCurrentDBClusterCapacity to set the capacity explicitly.

After this call sets the DB cluster capacity, Aurora Serverless can automatically scale the DB cluster based on the cooldown period for scaling up and the cooldown period for scaling down.

For more information about Aurora Serverless, see Using Amazon Aurora Serverless in the Amazon RDS User Guide.

If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, connections that prevent Aurora Serverless from finding a scaling point might be dropped. For more information about scaling points, see Autoscaling for Aurora Serverless in the Amazon RDS User Guide.

" + "documentation":"

Set the capacity of an Aurora Serverless DB cluster to a specific value.

Aurora Serverless scales seamlessly based on the workload on the DB cluster. In some cases, the capacity might not scale fast enough to meet a sudden change in workload, such as a large number of new transactions. Call ModifyCurrentDBClusterCapacity to set the capacity explicitly.

After this call sets the DB cluster capacity, Aurora Serverless can automatically scale the DB cluster based on the cooldown period for scaling up and the cooldown period for scaling down.

For more information about Aurora Serverless, see Using Amazon Aurora Serverless in the Amazon Aurora User Guide.

If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, connections that prevent Aurora Serverless from finding a scaling point might be dropped. For more information about scaling points, see Autoscaling for Aurora Serverless in the Amazon Aurora User Guide.

" }, "ModifyDBCluster":{ "name":"ModifyDBCluster", @@ -1159,7 +1161,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBClusterAlreadyExistsFault"} ], - "documentation":"

Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "ModifyDBClusterParameterGroup":{ "name":"ModifyDBClusterParameterGroup", @@ -1176,7 +1178,7 @@ {"shape":"DBParameterGroupNotFoundFault"}, {"shape":"InvalidDBParameterGroupStateFault"} ], - "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

" + "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

" }, "ModifyDBClusterSnapshotAttribute":{ "name":"ModifyDBClusterSnapshotAttribute", @@ -1318,7 +1320,7 @@ {"shape":"SNSTopicArnNotFoundFault"}, {"shape":"SubscriptionCategoryNotFoundFault"} ], - "documentation":"

Modifies an existing RDS event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" + "documentation":"

Modifies an existing RDS event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" }, "ModifyOptionGroup":{ "name":"ModifyOptionGroup", @@ -1404,7 +1406,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBInstanceNotFoundFault"} ], - "documentation":"

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

For more information about rebooting, see Rebooting a DB Instance.

" + "documentation":"

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.

" }, "RemoveRoleFromDBCluster":{ "name":"RemoveRoleFromDBCluster", @@ -1418,7 +1420,7 @@ {"shape":"DBClusterRoleNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Disassociates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora to Access Other AWS Services On Your Behalf.

" + "documentation":"

Disassociates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf in the Amazon Aurora User Guide.

" }, "RemoveSourceIdentifierFromSubscription":{ "name":"RemoveSourceIdentifierFromSubscription", @@ -1449,7 +1451,7 @@ {"shape":"DBSnapshotNotFoundFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Removes metadata tags from an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

" + "documentation":"

Removes metadata tags from an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, "ResetDBClusterParameterGroup":{ "name":"ResetDBClusterParameterGroup", @@ -1466,7 +1468,7 @@ {"shape":"InvalidDBParameterGroupStateFault"}, {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "ResetDBParameterGroup":{ "name":"ResetDBParameterGroup", @@ -1511,7 +1513,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"InsufficientStorageClusterCapacityFault"} ], - "documentation":"

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data from MySQL by Using an Amazon S3 Bucket.

" + "documentation":"

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data to an Amazon Aurora MySQL DB Cluster in the Amazon Aurora User Guide.

" }, "RestoreDBClusterFromSnapshot":{ "name":"RestoreDBClusterFromSnapshot", @@ -1544,7 +1546,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], - "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "RestoreDBClusterToPointInTime":{ "name":"RestoreDBClusterToPointInTime", @@ -1576,7 +1578,7 @@ {"shape":"StorageQuotaExceededFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], - "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "RestoreDBInstanceFromDBSnapshot":{ "name":"RestoreDBInstanceFromDBSnapshot", @@ -1643,7 +1645,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"BackupPolicyNotFoundFault"} ], - "documentation":"

Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance.

" + "documentation":"

Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance in the Amazon RDS User Guide.

" }, "RestoreDBInstanceToPointInTime":{ "name":"RestoreDBInstanceToPointInTime", @@ -1699,6 +1701,24 @@ ], "documentation":"

Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

" }, + "StartDBCluster":{ + "name":"StartDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDBClusterMessage"}, + "output":{ + "shape":"StartDBClusterResult", + "resultWrapper":"StartDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Starts an Amazon Aurora DB cluster that was stopped using the AWS console, the stop-db-cluster AWS CLI command, or the StopDBCluster action.

For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.

" + }, "StartDBInstance":{ "name":"StartDBInstance", "http":{ @@ -1723,7 +1743,25 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Starts a DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.

" + "documentation":"

Starts an Amazon RDS DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action.

For more information, see Starting an Amazon RDS DB Instance That Was Previously Stopped in the Amazon RDS User Guide.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora DB clusters, use StartDBCluster instead.

" + }, + "StopDBCluster":{ + "name":"StopDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDBClusterMessage"}, + "output":{ + "shape":"StopDBClusterResult", + "resultWrapper":"StopDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Stops an Amazon Aurora DB cluster. When you stop a DB cluster, Aurora retains the DB cluster's metadata, including its endpoints and DB parameter groups. Aurora also retains the transaction logs so you can do a point-in-time restore if necessary.

For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.

" }, "StopDBInstance":{ "name":"StopDBInstance", @@ -1743,7 +1781,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Stops a DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.

" + "documentation":"

Stops an Amazon RDS DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary.

For more information, see Stopping an Amazon RDS DB Instance Temporarily in the Amazon RDS User Guide.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora clusters, use StopDBCluster instead.

" } }, "shapes":{ @@ -2133,7 +2171,7 @@ "documentation":"

The list of log types to disable.

" } }, - "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs will be exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs will be exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

" }, "CopyDBClusterParameterGroupMessage":{ "type":"structure", @@ -2145,7 +2183,7 @@ "members":{ "SourceDBClusterParameterGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

  • Must specify a valid DB cluster parameter group.

  • If the source DB cluster parameter group is in the same AWS Region as the copy, specify a valid DB parameter group identifier, for example my-db-cluster-param-group, or a valid ARN.

  • If the source DB parameter group is in a different AWS Region than the copy, specify a valid DB cluster parameter group ARN, for example arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1.

" + "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon Aurora User Guide.

Constraints:

  • Must specify a valid DB cluster parameter group.

  • If the source DB cluster parameter group is in the same AWS Region as the copy, specify a valid DB parameter group identifier, for example my-db-cluster-param-group, or a valid ARN.

  • If the source DB parameter group is in a different AWS Region than the copy, specify a valid DB cluster parameter group ARN, for example arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1.

" }, "TargetDBClusterParameterGroupIdentifier":{ "shape":"String", @@ -2173,7 +2211,7 @@ "members":{ "SourceDBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.

Constraints:

  • Must specify a valid system snapshot in the \"available\" state.

  • If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier.

  • If the source snapshot is in a different AWS Region than the copy, specify a valid DB cluster snapshot ARN. For more information, go to Copying a DB Snapshot or DB Cluster Snapshot.

Example: my-cluster-snapshot1

" + "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.

Constraints:

  • Must specify a valid system snapshot in the \"available\" state.

  • If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier.

  • If the source snapshot is in a different AWS Region than the copy, specify a valid DB cluster snapshot ARN. For more information, go to Copying Snapshots Across AWS Regions in the Amazon Aurora User Guide.

Example: my-cluster-snapshot1

" }, "TargetDBClusterSnapshotIdentifier":{ "shape":"String", @@ -2211,7 +2249,7 @@ "members":{ "SourceDBParameterGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

  • Must specify a valid DB parameter group.

  • Must specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.

" + "documentation":"

The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

Constraints:

  • Must specify a valid DB parameter group.

  • Must specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.

" }, "TargetDBParameterGroupIdentifier":{ "shape":"String", @@ -2261,7 +2299,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

The name of an option group to associate with the copy of the snapshot.

Specify this option if you are copying a snapshot from one AWS Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across AWS Regions. For more information, see Option Group Considerations.

" + "documentation":"

The name of an option group to associate with the copy of the snapshot.

Specify this option if you are copying a snapshot from one AWS Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across AWS Regions. For more information, see Option Group Considerations in the Amazon RDS User Guide.

" } }, "documentation":"

" @@ -2282,7 +2320,7 @@ "members":{ "SourceOptionGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

  • Must specify a valid option group.

  • If the source option group is in the same AWS Region as the copy, specify a valid option group identifier, for example my-option-group, or a valid ARN.

  • If the source option group is in a different AWS Region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options.

" + "documentation":"

The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

Constraints:

  • Must specify a valid option group.

  • If the source option group is in the same AWS Region as the copy, specify a valid option group identifier, for example my-option-group, or a valid ARN.

  • If the source option group is in a different AWS Region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options.

" }, "TargetOptionGroupIdentifier":{ "shape":"String", @@ -2311,7 +2349,7 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

A list of EC2 Availability Zones that instances in the DB cluster can be created in. For information on AWS Regions and Availability Zones, see Regions and Availability Zones.

" + "documentation":"

A list of EC2 Availability Zones that instances in the DB cluster can be created in. For information on AWS Regions and Availability Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide.

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -2367,11 +2405,11 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" }, "ReplicationSourceIdentifier":{ "shape":"String", @@ -2400,15 +2438,19 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned or serverless.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", "documentation":"

For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB cluster should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false.

" } }, "documentation":"

" @@ -2543,7 +2585,7 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see The Backup Window.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred DB Instance Maintenance Window.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see The Backup Window in the Amazon RDS User Guide.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred DB Instance Maintenance Window in the Amazon RDS User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" }, "Port":{ "shape":"IntegerOptional", @@ -2567,7 +2609,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance. Must also be an integer multiple of 1000. For example, if the size of your DB instance is 500 GiB, then your Iops value can be 2000, 3000, 4000, or 5000.

" + "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance. Must also be an integer multiple of 1000. For example, if the size of your DB instance is 500 GiB, then your Iops value can be 2000, 3000, 4000, or 5000.

" }, "OptionGroupName":{ "shape":"String", @@ -2620,7 +2662,7 @@ }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to Setting Up and Enabling Enhanced Monitoring.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" + "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -2628,7 +2670,7 @@ }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

Default: 1

Valid Values: 0 - 15

" + "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

Default: 1

Valid Values: 0 - 15

" }, "Timezone":{ "shape":"String", @@ -2657,6 +2699,10 @@ "ProcessorFeatures":{ "shape":"ProcessorFeatureList", "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. For more information, see Deleting a DB Instance.

" } }, "documentation":"

" @@ -2674,7 +2720,7 @@ }, "SourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

Constraints:

  • Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance.

  • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6.

  • Can specify a DB instance that is a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for cross-region replication).

  • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.

  • If the source DB instance is in the same AWS Region as the Read Replica, specify a valid DB instance identifier.

  • If the source DB instance is in a different AWS Region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).

" + "documentation":"

The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

Constraints:

  • Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance.

  • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6.

  • Can specify a DB instance that is a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for cross-region replication).

  • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.

  • If the source DB instance is in the same AWS Region as the Read Replica, specify a valid DB instance identifier.

  • If the source DB instance is in a different AWS Region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

" }, "DBInstanceClass":{ "shape":"String", @@ -2727,7 +2773,7 @@ }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" + "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" }, "KmsKeyId":{ "shape":"String", @@ -2743,7 +2789,7 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

True to enable Performance Insights for the read replica, and otherwise false.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

" + "documentation":"

True to enable Performance Insights for the read replica, and otherwise false.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", @@ -2755,7 +2801,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -2764,6 +2810,10 @@ "UseDefaultProcessorFeatures":{ "shape":"BooleanOptional", "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. For more information, see Deleting a DB Instance.

" } } }, @@ -2910,7 +2960,7 @@ }, "EventCategories":{ "shape":"EventCategoriesList", - "documentation":"

A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" + "documentation":"

A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" }, "SourceIds":{ "shape":"SourceIdsList", @@ -3122,16 +3172,20 @@ }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon RDS User Guide.

" + "documentation":"

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon Aurora User Guide.

" }, "Capacity":{"shape":"IntegerOptional"}, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned or serverless.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

" }, - "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"} + "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"}, + "DeletionProtection":{ + "shape":"Boolean", + "documentation":"

Indicates if the DB cluster has deletion protection enabled. The database can't be deleted when this value is set to true.

" + } }, - "documentation":"

Contains the details of an Amazon RDS DB cluster.

This data type is used as a response element in the DescribeDBClusters action.

", + "documentation":"

Contains the details of an Amazon Aurora DB cluster.

This data type is used as a response element in the DescribeDBClusters, StopDBCluster, and StartDBCluster actions.

", "wrapper":true }, "DBClusterAlreadyExistsFault":{ @@ -3258,7 +3312,7 @@ }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

" + "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

" } }, "documentation":"

Contains information about an instance that is part of a DB cluster.

", @@ -3816,7 +3870,7 @@ }, "ReadReplicaDBClusterIdentifiers":{ "shape":"ReadReplicaDBClusterIdentifierList", - "documentation":"

Contains one or more identifiers of Aurora DB clusters that are Read Replicas of this DB instance.

" + "documentation":"

Contains one or more identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a Read Replica. For example, when you create an Aurora Read Replica of an RDS MySQL DB instance, the Aurora MySQL DB cluster for the Aurora Read Replica is shown. This output does not contain information about cross region Aurora Read Replicas.

" }, "LicenseModel":{ "shape":"String", @@ -3900,7 +3954,7 @@ }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

" + "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

" }, "DBInstanceArn":{ "shape":"String", @@ -3933,6 +3987,10 @@ "ProcessorFeatures":{ "shape":"ProcessorFeatureList", "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "DeletionProtection":{ + "shape":"Boolean", + "documentation":"

Indicates if the DB instance has deletion protection enabled. The database can't be deleted when this value is set to true. For more information, see Deleting a DB Instance.

" } }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", @@ -6128,7 +6186,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified DB instance isn't in the available state.

", + "documentation":"

The DB instance isn't in a valid state.

", "error":{ "code":"InvalidDBInstanceState", "httpStatusCode":400, @@ -6302,7 +6360,7 @@ "members":{ "ResourceName":{ "shape":"String", - "documentation":"

The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

" + "documentation":"

The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

" }, "Filters":{ "shape":"FilterList", @@ -6317,6 +6375,27 @@ }, "Long":{"type":"long"}, "LongOptional":{"type":"long"}, + "MinimumEngineVersionPerAllowedValue":{ + "type":"structure", + "members":{ + "AllowedValue":{ + "shape":"String", + "documentation":"

The allowed value for an option setting.

" + }, + "MinimumEngineVersion":{ + "shape":"String", + "documentation":"

The minimum DB engine version required for the allowed value.

" + } + }, + "documentation":"

The minimum DB engine version required for each corresponding allowed value for an option setting.

" + }, + "MinimumEngineVersionPerAllowedValueList":{ + "type":"list", + "member":{ + "shape":"MinimumEngineVersionPerAllowedValue", + "locationName":"MinimumEngineVersionPerAllowedValue" + } + }, "ModifyCurrentDBClusterCapacityMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -6353,7 +6432,7 @@ }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

The ApplyImmediately parameter only affects the NewDBClusterIdentifier and MasterUserPassword values. If you set the ApplyImmediately parameter value to false, then changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

Default: false

" + "documentation":"

A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

The ApplyImmediately parameter only affects the EnableIAMDatabaseAuthentication, MasterUserPassword, and NewDBClusterIdentifier values. If you set the ApplyImmediately parameter value to false, then changes to the EnableIAMDatabaseAuthentication, MasterUserPassword, and NewDBClusterIdentifier values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

Default: false

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -6381,11 +6460,11 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -6406,6 +6485,10 @@ "ScalingConfiguration":{ "shape":"ScalingConfiguration", "documentation":"

The scaling properties of the DB cluster. You can only modify scaling properties for DB clusters in serverless DB engine mode.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB cluster has deletion protection enabled. The database can't be deleted when this value is set to true.

" } }, "documentation":"

" @@ -6484,7 +6567,7 @@ }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance is not in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Updating the VPC for a DB Instance.

Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetGroup

" + "documentation":"

The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance is not in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Updating the VPC for a DB Instance in the Amazon RDS User Guide.

Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetGroup

" }, "DBSecurityGroups":{ "shape":"DBSecurityGroupNameList", @@ -6496,7 +6579,7 @@ }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes are applied.

Default: false

" + "documentation":"

Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter in the Amazon RDS User Guide. to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes are applied.

Default: false

" }, "MasterUserPassword":{ "shape":"String", @@ -6588,7 +6671,7 @@ }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" + "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -6596,7 +6679,7 @@ }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

Default: 1

Valid Values: 0 - 15

" + "documentation":"

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

Default: 1

Valid Values: 0 - 15

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -6625,6 +6708,10 @@ "UseDefaultProcessorFeatures":{ "shape":"BooleanOptional", "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance has deletion protection enabled. The database can't be deleted when this value is set to true. For more information, see Deleting a DB Instance.

" } }, "documentation":"

" @@ -6699,7 +6786,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

The option group to identify with the upgraded DB snapshot.

You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see Option Group Considerations.

" + "documentation":"

The option group to identify with the upgraded DB snapshot.

You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see Option Group Considerations in the Amazon RDS User Guide.

" } } }, @@ -6755,7 +6842,7 @@ }, "EventCategories":{ "shape":"EventCategoriesList", - "documentation":"

A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" + "documentation":"

A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

" }, "Enabled":{ "shape":"BooleanOptional", @@ -7059,6 +7146,14 @@ "IsModifiable":{ "shape":"Boolean", "documentation":"

Boolean value where true indicates that this option group option can be changed from the default value.

" + }, + "IsRequired":{ + "shape":"Boolean", + "documentation":"

Boolean value where true indicates that a value must be specified for this option setting of the option group option.

" + }, + "MinimumEngineVersionPerAllowedValue":{ + "shape":"MinimumEngineVersionPerAllowedValueList", + "documentation":"

The minimum DB engine version required for the corresponding allowed value for this option setting.

" } }, "documentation":"

Option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

" @@ -7784,7 +7879,7 @@ "members":{ "ResourceName":{ "shape":"String", - "documentation":"

The Amazon RDS resource that the tags are removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

" + "documentation":"

The Amazon RDS resource that the tags are removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

" }, "TagKeys":{ "shape":"KeyList", @@ -8133,11 +8228,11 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Aurora User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Aurora User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" }, "Tags":{"shape":"TagList"}, "StorageEncrypted":{ @@ -8178,7 +8273,11 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB cluster should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false.

" } } }, @@ -8254,15 +8353,19 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned or serverless.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, or parallelquery.

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", "documentation":"

For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB cluster should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false.

" } }, "documentation":"

" @@ -8331,7 +8434,11 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB cluster should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false.

" } }, "documentation":"

" @@ -8399,7 +8506,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts.

The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS Storage to Improve Performance.

Constraints: Must be an integer greater than 1000.

" + "documentation":"

Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts.

The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be an integer greater than 1000.

" }, "OptionGroupName":{ "shape":"String", @@ -8436,7 +8543,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8445,6 +8552,10 @@ "UseDefaultProcessorFeatures":{ "shape":"BooleanOptional", "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. For more information, see Deleting a DB Instance.

" } }, "documentation":"

" @@ -8505,7 +8616,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone that the DB instance is created in. For information about AWS Regions and Availability Zones, see Regions and Availability Zones.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" + "documentation":"

The Availability Zone that the DB instance is created in. For information about AWS Regions and Availability Zones, see Regions and Availability Zones in the Amazon RDS User Guide.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" }, "DBSubnetGroupName":{ "shape":"String", @@ -8513,7 +8624,7 @@ }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.

Constraints:

  • Must be in the format ddd:hh24:mi-ddd:hh24:mi.

  • Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred backup window.

  • Must be at least 30 minutes.

" + "documentation":"

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window in the Amazon RDS User Guide.

Constraints:

  • Must be in the format ddd:hh24:mi-ddd:hh24:mi.

  • Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred backup window.

  • Must be at least 30 minutes.

" }, "DBParameterGroupName":{ "shape":"String", @@ -8525,7 +8636,7 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The time range each day during which automated backups are created if automated backups are enabled. For more information, see The Backup Window.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + "documentation":"

The time range each day during which automated backups are created if automated backups are enabled. For more information, see The Backup Window in the Amazon RDS User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" }, "Port":{ "shape":"IntegerOptional", @@ -8549,7 +8660,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance.

" + "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

" }, "OptionGroupName":{ "shape":"String", @@ -8561,7 +8672,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to associate with this DB instance. For more information, see Tagging Amazon RDS Resources.

" + "documentation":"

A list of tags to associate with this DB instance. For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, "StorageType":{ "shape":"String", @@ -8585,7 +8696,7 @@ }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" + "documentation":"

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -8625,7 +8736,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8634,6 +8745,10 @@ "UseDefaultProcessorFeatures":{ "shape":"BooleanOptional", "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. For more information, see Deleting a DB Instance.

" } } }, @@ -8745,7 +8860,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

" + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

" }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -8754,6 +8869,10 @@ "UseDefaultProcessorFeatures":{ "shape":"BooleanOptional", "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" + }, + "DeletionProtection":{ + "shape":"BooleanOptional", + "documentation":"

Indicates if the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. For more information, see Deleting a DB Instance.

" } }, "documentation":"

" @@ -8853,7 +8972,7 @@ "documentation":"

The time, in seconds, before an Aurora DB cluster in serverless mode is paused.

" } }, - "documentation":"

Contains the scaling configuration of an Aurora Serverless DB cluster.

For more information, see Using Amazon Aurora Serverless in the Amazon RDS User Guide.

" + "documentation":"

Contains the scaling configuration of an Aurora Serverless DB cluster.

For more information, see Using Amazon Aurora Serverless in the Amazon Aurora User Guide.

" }, "ScalingConfigurationInfo":{ "type":"structure", @@ -8875,7 +8994,7 @@ "documentation":"

The remaining amount of time, in seconds, before the Aurora DB cluster in serverless mode is paused. A DB cluster can be paused only when it's idle (it has no connections).

" } }, - "documentation":"

Shows the scaling configuration for an Aurora DB cluster in serverless DB engine mode.

For more information, see Using Amazon Aurora Serverless in the Amazon RDS User Guide.

" + "documentation":"

Shows the scaling configuration for an Aurora DB cluster in serverless DB engine mode.

For more information, see Using Amazon Aurora Serverless in the Amazon Aurora User Guide.

" }, "SharedSnapshotQuotaExceededFault":{ "type":"structure", @@ -8970,6 +9089,22 @@ "db-cluster-snapshot" ] }, + "StartDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the Amazon Aurora DB cluster to be started. This parameter is stored as a lowercase string.

" + } + } + }, + "StartDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, "StartDBInstanceMessage":{ "type":"structure", "required":["DBInstanceIdentifier"], @@ -8986,6 +9121,22 @@ "DBInstance":{"shape":"DBInstance"} } }, + "StopDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the Amazon Aurora DB cluster to be stopped. This parameter is stored as a lowercase string.

" + } + } + }, + "StopDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, "StopDBInstanceMessage":{ "type":"structure", "required":["DBInstanceIdentifier"], @@ -9143,7 +9294,7 @@ "shape":"Tag", "locationName":"Tag" }, - "documentation":"

A list of tags. For more information, see Tagging Amazon RDS Resources.

" + "documentation":"

A list of tags. For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, "TagListMessage":{ "type":"structure", diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 015ba380..588b15db 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -1139,6 +1139,30 @@ ], "documentation":"

Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to \"engine-default\". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.

" }, + "ResizeCluster":{ + "name":"ResizeCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResizeClusterMessage"}, + "output":{ + "shape":"ResizeClusterResult", + "resultWrapper":"ResizeClusterResult" + }, + "errors":[ + {"shape":"InvalidClusterStateFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"NumberOfNodesQuotaExceededFault"}, + {"shape":"NumberOfNodesPerClusterLimitExceededFault"}, + {"shape":"InsufficientClusterCapacityFault"}, + {"shape":"UnsupportedOptionFault"}, + {"shape":"UnsupportedOperationFault"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"LimitExceededFault"} + ], + "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize your cluster is avaialble for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

  • The type of nodes you add must match the node type for the cluster.

" + }, "RestoreFromClusterSnapshot":{ "name":"RestoreFromClusterSnapshot", "http":{ @@ -1578,6 +1602,10 @@ "MaintenanceTrackName":{ "shape":"String", "documentation":"

The name of the maintenance track for the cluster.

" + }, + "ElasticResizeNumberOfNodeOptions":{ + "shape":"String", + "documentation":"

Indicates the number of nodes the cluster can be resized to with the elastic resize method.

" } }, "documentation":"

Describes a cluster.

", @@ -4512,6 +4540,14 @@ "MaintenanceTrackName":{ "shape":"String", "documentation":"

The name for the maintenance track that you want to assign for the cluster. This name change is asynchronous. The new track name stays in the PendingModifiedValues for the cluster until the next maintenance window. When the maintenance track changes, the cluster is switched to the latest cluster release available for the maintenance track. At this point, the maintenance track name is applied.

" + }, + "Encrypted":{ + "shape":"BooleanOptional", + "documentation":"

Indicates whether the cluster is encrypted. If the cluster is encrypted and you provide a value for the KmsKeyId parameter, we will encrypt the cluster with the provided KmsKeyId. If you don't provide a KmsKeyId, we will encrypt with the default key. In the China region we will use legacy encryption if you specify that the cluster is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

" } }, "documentation":"

" @@ -4810,6 +4846,10 @@ "MaintenanceTrackName":{ "shape":"String", "documentation":"

The name of the maintenance track that the cluster will change to during the next maintenance window.

" + }, + "EncryptionType":{ + "shape":"String", + "documentation":"

The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.

" } }, "documentation":"

Describes cluster attributes that are in a pending state. A change to one or more the attributes was requested and is in progress or will be applied.

" @@ -5098,6 +5138,41 @@ }, "documentation":"

" }, + "ResizeClusterMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "NumberOfNodes" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier for the cluster to resize.

" + }, + "ClusterType":{ + "shape":"String", + "documentation":"

The new cluster type for the specified cluster.

" + }, + "NodeType":{ + "shape":"String", + "documentation":"

The new node type for the nodes you are adding.

" + }, + "NumberOfNodes":{ + "shape":"Integer", + "documentation":"

The new number of nodes for the cluster.

" + }, + "Classic":{ + "shape":"BooleanOptional", + "documentation":"

A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to false the resize type is elastic.

" + } + } + }, + "ResizeClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, "ResizeNotFoundFault":{ "type":"structure", "members":{ @@ -5160,6 +5235,18 @@ "EstimatedTimeToCompletionInSeconds":{ "shape":"LongOptional", "documentation":"

The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.

" + }, + "ResizeType":{ + "shape":"String", + "documentation":"

An enum with possible values of ClassicResize and ElasticResize. These values describe the type of resize operation being performed.

" + }, + "Message":{ + "shape":"String", + "documentation":"

An optional string to provide additional details about the resize action.

" + }, + "TargetEncryptionType":{ + "shape":"String", + "documentation":"

The type of encryption for the cluster after the resize is complete.

Possible values are KMS and None. In the China region possible values are: Legacy and None.

" } }, "documentation":"

Describes the result of a cluster resize operation.

" diff --git a/botocore/data/rekognition/2016-06-27/service-2.json b/botocore/data/rekognition/2016-06-27/service-2.json index b8cf7c67..19e331dc 100644 --- a/botocore/data/rekognition/2016-06-27/service-2.json +++ b/botocore/data/rekognition/2016-06-27/service-2.json @@ -30,7 +30,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Compares a face in the source input image with each of the 100 largest faces detected in the target input image.

If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image.

You pass the input and target images either as base64-encoded image bytes or as a references to images in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.

By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter.

CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value.

If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.

If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.

This is a stateless API operation. That is, data returned by this operation doesn't persist.

For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:CompareFaces action.

" + "documentation":"

Compares a face in the source input image with each of the 100 largest faces detected in the target input image.

If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image.

You pass the input and target images either as base64-encoded image bytes or as references to images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.

By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter.

CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value.

If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.

If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.

This is a stateless API operation. That is, data returned by this operation doesn't persist.

For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:CompareFaces action.

" }, "CreateCollection":{ "name":"CreateCollection", @@ -124,6 +124,24 @@ ], "documentation":"

Deletes the stream processor identified by Name. You assign the value for Name when you create the stream processor with . You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor.

" }, + "DescribeCollection":{ + "name":"DescribeCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCollectionRequest"}, + "output":{"shape":"DescribeCollectionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes the specified collection. You can use DescribeCollection to get information, such as the number of faces indexed into a collection and the version of the model used by the collection for face detection.

For more information, see Describing a Collection in the Amazon Rekognition Developer Guide.

" + }, "DescribeStreamProcessor":{ "name":"DescribeStreamProcessor", "http":{ @@ -160,7 +178,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces within an image that is provided as input.

DetectFaces detects the 100 largest faces in the image. For each face detected, the operation returns face details including a bounding box of the face, a confidence value (that the bounding box contains a face), and a fixed set of attributes such as facial landmarks (for example, coordinates of eye and mouth), gender, presence of beard, sunglasses, etc.

The face-detection algorithm is most effective on frontal faces. For non-frontal or obscured faces, the algorithm may not detect the faces or might detect faces with lower confidence.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectFaces action.

" + "documentation":"

Detects faces within an image that is provided as input.

DetectFaces detects the 100 largest faces in the image. For each face detected, the operation returns face details. These details include a bounding box of the face, a confidence value (that the bounding box contains a face), and a fixed set of attributes such as facial landmarks (for example, coordinates of eye and mouth), gender, presence of beard, sunglasses, and so on.

The face-detection algorithm is most effective on frontal faces. For non-frontal or obscured faces, the algorithm might not detect the faces or might detect faces with lower confidence.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectFaces action.

" }, "DetectLabels":{ "name":"DetectLabels", @@ -180,7 +198,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response will include all three labels, one for each object.

{Name: lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: 79.2097}

{Name: sea,Confidence: 75.061}

In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

{Name: flower,Confidence: 99.0562}

{Name: plant,Confidence: 99.0562}

{Name: tulip,Confidence: 99.0562}

In this example, the detection algorithm more precisely identifies the flower as a tulip.

In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50%. You can also add the MaxLabels parameter to limit the number of labels returned.

If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectLabels action.

" + "documentation":"

Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object.

{Name: lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: 79.2097}

{Name: sea,Confidence: 75.061}

In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

{Name: flower,Confidence: 99.0562}

{Name: plant,Confidence: 99.0562}

{Name: tulip,Confidence: 99.0562}

In this example, the detection algorithm more precisely identifies the flower as a tulip.

In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50%. You can also add the MaxLabels parameter to limit the number of labels returned.

If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectLabels action.

" }, "DetectModerationLabels":{ "name":"DetectModerationLabels", @@ -200,7 +218,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" + "documentation":"

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" }, "DetectText":{ "name":"DetectText", @@ -220,7 +238,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 30 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" + "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" }, "GetCelebrityInfo":{ "name":"GetCelebrityInfo", @@ -238,7 +256,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets the name and additional information about a celebrity based on his or her Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty.

For more information, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:GetCelebrityInfo action.

" + "documentation":"

Gets the name and additional information about a celebrity based on his or her Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty.

For more information, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:GetCelebrityInfo action.

" }, "GetCelebrityRecognition":{ "name":"GetCelebrityRecognition", @@ -373,7 +391,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.

If you are using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, check the the value of FaceModelVersion in the response from IndexFaces.

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

In response, the operation returns an array of metadata for all detected faces. This includes, the bounding box of the detected face, confidence value (indicating the bounding box contains a face), a face ID assigned by the service for each face that is detected and stored, and an image ID assigned by the service for the input image. If you request all facial attributes (using the detectionAttributes parameter, Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mount) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" + "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the and operations.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

To get the number of faces in a collection, call .

If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you're using, call and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces.

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. By default, IndexFaces filters detected faces. You can also explicitly filter detected faces by specifying AUTO for the value of QualityFilter. If you do not want to filter detected faces, specify NONE.

To use quality filtering, you need a collection associated with version 3 of the face model. To get the version of the face model associated with a collection, call .

Information about faces detected in an image, but not indexed, is returned in an array of objects, UnindexedFaces. Faces aren't indexed for reasons such as:

  • The number of faces detected exceeds the value of the MaxFaces request parameter.

  • The face is too small compared to the image dimensions.

  • The face is too blurry.

  • The image is too dark.

  • The face has an extreme pose.

In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes:

  • The bounding box, BoundingBox, of the detected face.

  • A confidence value, Confidence, which indicates the confidence that the bounding box contains a face.

  • A face ID, faceId, assigned by the service for each face that's detected and stored.

  • An image ID, ImageId, assigned by the service for the input image.

If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes like gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" }, "ListCollections":{ "name":"ListCollections", @@ -450,7 +468,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces are not amongst the largest 100 faces in the image.

For each celebrity recognized, the RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Rekognition does not retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the operation.

You pass the imput image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

" + "documentation":"

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 100 faces in the image.

For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

" }, "SearchFaces":{ "name":"SearchFaces", @@ -489,7 +507,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the operation, and then use the face IDs returned in subsequent calls to the operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

" + "documentation":"

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the operation, and then use the face IDs returned in subsequent calls to the operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

" }, "StartCelebrityRecognition":{ "name":"StartCelebrityRecognition", @@ -577,7 +595,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see collections-search-person.

", + "documentation":"

Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see procedure-person-search-videos.

", "idempotent":true }, "StartLabelDetection":{ @@ -683,7 +701,7 @@ "documentation":"

The highest estimated age.

" } }, - "documentation":"

Structure containing the estimated age range, in years, for a face.

Rekognition estimates an age-range for faces detected in the input image. Estimated age ranges can overlap; a face of a 5 year old may have an estimated range of 4-6 whilst the face of a 6 year old may have an estimated range of 4-8.

" + "documentation":"

Structure containing the estimated age range, in years, for a face.

Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age ranges can overlap. A face of a 5-year-old might have an estimated range of 4-6, while the face of a 6-year-old might have an estimated range of 4-8.

" }, "Attribute":{ "type":"string", @@ -731,7 +749,7 @@ "documentation":"

Top coordinate of the bounding box as a ratio of overall image height.

" } }, - "documentation":"

Identifies the bounding box around the object, face or text. The left (x-coordinate) and top (y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).

The top and left values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200).

The width and height values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.

The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the left or top values.

" + "documentation":"

Identifies the bounding box around the face or text. The left (x-coordinate) and top (y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).

The top and left values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200).

The width and height values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.

The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the left or top values.

" }, "Celebrity":{ "type":"structure", @@ -754,7 +772,7 @@ }, "MatchConfidence":{ "shape":"Percent", - "documentation":"

The confidence, in percentage, that Rekognition has that the recognized face is the celebrity.

" + "documentation":"

The confidence, in percentage, that Amazon Rekognition has that the recognized face is the celebrity.

" } }, "documentation":"

Provides information about a celebrity recognized by the operation.

" @@ -846,7 +864,7 @@ "documentation":"

Provides face metadata (bounding box and confidence that the bounding box actually contains a face).

" } }, - "documentation":"

Provides information about a face in a target image that matches the source image face analysed by CompareFaces. The Face property contains the bounding box of the face in the target image. The Similarity property is the confidence that the source image face matches the face in the bounding box.

" + "documentation":"

Provides information about a face in a target image that matches the source image face analyzed by CompareFaces. The Face property contains the bounding box of the face in the target image. The Similarity property is the confidence that the source image face matches the face in the bounding box.

" }, "CompareFacesMatchList":{ "type":"list", @@ -890,11 +908,11 @@ }, "SourceImageOrientationCorrection":{ "shape":"OrientationCorrection", - "documentation":"

The orientation of the source image (counterclockwise direction). If your application displays the source image, you can use this value to correct image orientation. The bounding box coordinates returned in SourceImageFace represent the location of the face before the image orientation is corrected.

If the source image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If the Exif metadata for the source image populates the orientation field, the value of OrientationCorrection is null and the SourceImageFace bounding box coordinates represent the location of the face after Exif metadata is used to correct the orientation. Images in .png format don't contain Exif metadata.

" + "documentation":"

The orientation of the source image (counterclockwise direction). If your application displays the source image, you can use this value to correct image orientation. The bounding box coordinates returned in SourceImageFace represent the location of the face before the image orientation is corrected.

If the source image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If the Exif metadata for the source image populates the orientation field, the value of OrientationCorrection is null. The SourceImageFace bounding box coordinates represent the location of the face after Exif metadata is used to correct the orientation. Images in .png format don't contain Exif metadata.

" }, "TargetImageOrientationCorrection":{ "shape":"OrientationCorrection", - "documentation":"

The orientation of the target image (in counterclockwise direction). If your application displays the target image, you can use this value to correct the orientation of the image. The bounding box coordinates returned in FaceMatches and UnmatchedFaces represent face locations before the image orientation is corrected.

If the target image is in .jpg format, it might contain Exif metadata that includes the orientation of the image. If the Exif metadata for the target image populates the orientation field, the value of OrientationCorrection is null and the bounding box coordinates in FaceMatches and UnmatchedFaces represent the location of the face after Exif metadata is used to correct the orientation. Images in .png format don't contain Exif metadata.

" + "documentation":"

The orientation of the target image (in counterclockwise direction). If your application displays the target image, you can use this value to correct the orientation of the image. The bounding box coordinates returned in FaceMatches and UnmatchedFaces represent face locations before the image orientation is corrected.

If the target image is in .jpg format, it might contain Exif metadata that includes the orientation of the image. If the Exif metadata for the target image populates the orientation field, the value of OrientationCorrection is null. The bounding box coordinates in FaceMatches and UnmatchedFaces represent the location of the face after Exif metadata is used to correct the orientation. Images in .png format don't contain Exif metadata.

" } } }, @@ -926,7 +944,7 @@ "documentation":"

Identifies face image brightness and sharpness.

" } }, - "documentation":"

Provides face metadata for target image faces that are analysed by CompareFaces and RecognizeCelebrities.

" + "documentation":"

Provides face metadata for target image faces that are analyzed by CompareFaces and RecognizeCelebrities.

" }, "ComparedFaceList":{ "type":"list", @@ -1105,6 +1123,37 @@ "members":{ } }, + "DescribeCollectionRequest":{ + "type":"structure", + "required":["CollectionId"], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

The ID of the collection to describe.

" + } + } + }, + "DescribeCollectionResponse":{ + "type":"structure", + "members":{ + "FaceCount":{ + "shape":"ULong", + "documentation":"

The number of faces that are indexed into the collection. To index faces into a collection, use .

" + }, + "FaceModelVersion":{ + "shape":"String", + "documentation":"

The version of the face model that's used by the collection for face detection.

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

" + }, + "CollectionARN":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the collection.

" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The number of milliseconds since the Unix epoch time until the creation of the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970.

" + } + } + }, "DescribeStreamProcessorRequest":{ "type":"structure", "required":["Name"], @@ -1170,7 +1219,7 @@ }, "Attributes":{ "shape":"Attributes", - "documentation":"

An array of facial attributes you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify [\"DEFAULT\"], the API returns the following subset of facial attributes: BoundingBox, Confidence, Pose, Quality and Landmarks. If you provide [\"ALL\"], all facial attributes are returned but the operation will take longer to complete.

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).

" + "documentation":"

An array of facial attributes you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify [\"DEFAULT\"], the API returns the following subset of facial attributes: BoundingBox, Confidence, Pose, Quality, and Landmarks. If you provide [\"ALL\"], all facial attributes are returned, but the operation takes longer to complete.

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).

" } } }, @@ -1183,7 +1232,7 @@ }, "OrientationCorrection":{ "shape":"OrientationCorrection", - "documentation":"

The orientation of the input image (counter-clockwise direction). If your application displays the image, you can use this value to correct image orientation. The bounding box coordinates returned in FaceDetails represent face locations before the image orientation is corrected.

If the input image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If so, and the Exif metadata for the input image populates the orientation field, the value of OrientationCorrection is null and the FaceDetails bounding box coordinates represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.

" + "documentation":"

The orientation of the input image (counter-clockwise direction). If your application displays the image, you can use this value to correct image orientation. The bounding box coordinates returned in FaceDetails represent face locations before the image orientation is corrected.

If the input image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If so, and the Exif metadata for the input image populates the orientation field, the value of OrientationCorrection is null. The FaceDetails bounding box coordinates represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.

" } } }, @@ -1494,7 +1543,7 @@ "documentation":"

Structure containing attributes of the face that the algorithm detected.

" } }, - "documentation":"

Object containing both the face metadata (stored in the back-end database) and facial attributes that are detected but aren't stored in the database.

" + "documentation":"

Object containing both the face metadata (stored in the backend database), and facial attributes that are detected but aren't stored in the database.

" }, "FaceRecordList":{ "type":"list", @@ -1877,7 +1926,7 @@ "documentation":"

Identifies an S3 object as the image source.

" } }, - "documentation":"

Provides the input image either as bytes or an S3 object.

You pass image bytes to a Rekognition API operation by using the Bytes property. For example, you would use the Bytes property to pass an image loaded from a local file system. Image bytes passed by using the Bytes property must be base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to call Rekognition API operations.

For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.

You pass images stored in an S3 bucket to a Rekognition API operation by using the S3Object property. Images stored in an S3 bucket do not need to be base64-encoded.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.

" + "documentation":"

Provides the input image either as bytes or an S3 object.

You pass image bytes to an Amazon Rekognition API operation by using the Bytes property. For example, you would use the Bytes property to pass an image loaded from a local file system. Image bytes passed by using the Bytes property must be base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to call Amazon Rekognition API operations.

For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.

You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the S3Object property. Images stored in an S3 bucket do not need to be base64-encoded.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.

" }, "ImageBlob":{ "type":"blob", @@ -1922,15 +1971,23 @@ }, "Image":{ "shape":"Image", - "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

" + "documentation":"

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.

" }, "ExternalImageId":{ "shape":"ExternalImageId", - "documentation":"

ID you want to assign to all the faces detected in the image.

" + "documentation":"

The ID you want to assign to all the faces detected in the image.

" }, "DetectionAttributes":{ "shape":"Attributes", - "documentation":"

An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify [\"DEFAULT\"], the API returns the following subset of facial attributes: BoundingBox, Confidence, Pose, Quality and Landmarks. If you provide [\"ALL\"], all facial attributes are returned but the operation will take longer to complete.

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).

" + "documentation":"

An array of facial attributes that you want to be returned. This can be the default list of attributes or all attributes. If you don't specify a value for Attributes or if you specify [\"DEFAULT\"], the API returns the following subset of facial attributes: BoundingBox, Confidence, Pose, Quality, and Landmarks. If you provide [\"ALL\"], all facial attributes are returned, but the operation takes longer to complete.

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).

" + }, + "MaxFaces":{ + "shape":"MaxFacesToIndex", + "documentation":"

The maximum number of faces to index. The value of MaxFaces must be greater than or equal to 1. IndexFaces returns no more than 100 detected faces in an image, even if you specify a larger value for MaxFaces.

If IndexFaces detects more faces than the value of MaxFaces, the faces with the lowest quality are filtered out first. If there are still more faces than the value of MaxFaces, the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of MaxFaces). Information about the unindexed faces is available in the UnindexedFaces array.

The faces that are returned by IndexFaces are sorted by the largest face bounding box size to the smallest size, in descending order.

MaxFaces can be used with a collection associated with any version of the face model.

" + }, + "QualityFilter":{ + "shape":"QualityFilter", + "documentation":"

A filter that specifies how much filtering is done to identify faces that are detected with low quality. Filtered faces aren't indexed. If you specify AUTO, filtering prioritizes the identification of faces that don’t meet the required quality bar chosen by Amazon Rekognition. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify NONE, no filtering is performed. The default value is AUTO.

To use quality filtering, the collection you are using must be associated with version 3 of the face model.

" } } }, @@ -1943,11 +2000,15 @@ }, "OrientationCorrection":{ "shape":"OrientationCorrection", - "documentation":"

The orientation of the input image (counterclockwise direction). If your application displays the image, you can use this value to correct image orientation. The bounding box coordinates returned in FaceRecords represent face locations before the image orientation is corrected.

If the input image is in jpeg format, it might contain exchangeable image (Exif) metadata. If so, and the Exif metadata populates the orientation field, the value of OrientationCorrection is null and the bounding box coordinates in FaceRecords represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.

" + "documentation":"

The orientation of the input image (counterclockwise direction). If your application displays the image, you can use this value to correct image orientation. The bounding box coordinates returned in FaceRecords represent face locations before the image orientation is corrected.

If the input image is in jpeg format, it might contain exchangeable image (Exif) metadata. If so, and the Exif metadata populates the orientation field, the value of OrientationCorrection is null. The bounding box coordinates in FaceRecords represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.

" }, "FaceModelVersion":{ "shape":"String", - "documentation":"

Version number of the face detection model associated with the input collection (CollectionId).

" + "documentation":"

The version number of the face detection model that's associated with the input collection (CollectionId).

" + }, + "UnindexedFaces":{ + "shape":"UnindexedFaces", + "documentation":"

An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality filter identified them as low quality, or the MaxFaces request parameter filtered them out. To use the quality filter, you specify the QualityFilter request parameter.

" } } }, @@ -2075,15 +2136,15 @@ "members":{ "Type":{ "shape":"LandmarkType", - "documentation":"

Type of the landmark.

" + "documentation":"

Type of landmark.

" }, "X":{ "shape":"Float", - "documentation":"

x-coordinate from the top left of the landmark expressed as the ratio of the width of the image. For example, if the images is 700x200 and the x-coordinate of the landmark is at 350 pixels, this value is 0.5.

" + "documentation":"

The x-coordinate from the top left of the landmark expressed as the ratio of the width of the image. For example, if the image is 700 x 200 and the x-coordinate of the landmark is at 350 pixels, this value is 0.5.

" }, "Y":{ "shape":"Float", - "documentation":"

y-coordinate from the top left of the landmark expressed as the ratio of the height of the image. For example, if the images is 700x200 and the y-coordinate of the landmark is at 100 pixels, this value is 0.5.

" + "documentation":"

The y-coordinate from the top left of the landmark expressed as the ratio of the height of the image. For example, if the image is 700 x 200 and the y-coordinate of the landmark is at 100 pixels, this value is 0.5.

" } }, "documentation":"

Indicates the location of the landmark on the face.

" @@ -2225,6 +2286,10 @@ "max":4096, "min":1 }, + "MaxFacesToIndex":{ + "type":"integer", + "min":1 + }, "MaxResults":{ "type":"integer", "min":1 @@ -2242,7 +2307,7 @@ }, "ParentName":{ "shape":"String", - "documentation":"

The name for the parent label. Labels at the top-level of the hierarchy have the parent label \"\".

" + "documentation":"

The name for the parent label. Labels at the top level of the hierarchy have the parent label \"\".

" } }, "documentation":"

Provides information about a single type of moderated content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" @@ -2373,7 +2438,7 @@ "documentation":"

Information about the faces in the input collection that match the face of a person in the video.

" } }, - "documentation":"

Information about a person whose face matches a face(s) in a Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (, information about the person (PersonDetail) and the timestamp for when the person was detected in a video. An array of PersonMatch objects is returned by .

" + "documentation":"

Information about a person whose face matches a face(s) in an Amazon Rekognition collection. Includes information about the faces in the Amazon Rekognition collection (), information about the person (PersonDetail), and the time stamp for when the person was detected in a video. An array of PersonMatch objects is returned by .

" }, "PersonMatches":{ "type":"list", @@ -2429,6 +2494,28 @@ "documentation":"

The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.

", "exception":true }, + "QualityFilter":{ + "type":"string", + "enum":[ + "NONE", + "AUTO" + ] + }, + "Reason":{ + "type":"string", + "enum":[ + "EXCEEDS_MAX_FACES", + "EXTREME_POSE", + "LOW_BRIGHTNESS", + "LOW_SHARPNESS", + "LOW_CONFIDENCE", + "SMALL_BOUNDING_BOX" + ] + }, + "Reasons":{ + "type":"list", + "member":{"shape":"Reason"} + }, "RecognizeCelebritiesRequest":{ "type":"structure", "required":["Image"], @@ -2452,7 +2539,7 @@ }, "OrientationCorrection":{ "shape":"OrientationCorrection", - "documentation":"

The orientation of the input image (counterclockwise direction). If your application displays the image, you can use this value to correct the orientation. The bounding box coordinates returned in CelebrityFaces and UnrecognizedFaces represent face locations before the image orientation is corrected.

If the input image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If so, and the Exif metadata for the input image populates the orientation field, the value of OrientationCorrection is null and the CelebrityFaces and UnrecognizedFaces bounding box coordinates represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.

" + "documentation":"

The orientation of the input image (counterclockwise direction). If your application displays the image, you can use this value to correct the orientation. The bounding box coordinates returned in CelebrityFaces and UnrecognizedFaces represent face locations before the image orientation is corrected.

If the input image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If so, and the Exif metadata for the input image populates the orientation field, the value of OrientationCorrection is null. The CelebrityFaces and UnrecognizedFaces bounding box coordinates represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.

" } } }, @@ -2507,7 +2594,7 @@ "documentation":"

If the bucket is versioning enabled, you can specify the object version.

" } }, - "documentation":"

Provides the S3 bucket name and object name.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.

" + "documentation":"

Provides the S3 bucket name and object name.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource-Based Policies in the Amazon Rekognition Developer Guide.

" }, "S3ObjectName":{ "type":"string", @@ -3006,6 +3093,24 @@ "type":"long", "min":0 }, + "UnindexedFace":{ + "type":"structure", + "members":{ + "Reasons":{ + "shape":"Reasons", + "documentation":"

An array of reasons that specify why a face wasn't indexed.

  • EXTREME_POSE - The face is at a pose that can't be detected. For example, the head is turned too far away from the camera.

  • EXCEEDS_MAX_FACES - The number of faces detected is already higher than that specified by the MaxFaces input parameter for IndexFaces.

  • LOW_BRIGHTNESS - The image is too dark.

  • LOW_SHARPNESS - The image is too blurry.

  • LOW_CONFIDENCE - The face was detected with a low confidence.

  • SMALL_BOUNDING_BOX - The bounding box around the face is too small.

" + }, + "FaceDetail":{ + "shape":"FaceDetail", + "documentation":"

The structure that contains attributes of a face that IndexFacesdetected, but didn't index.

" + } + }, + "documentation":"

A face that detected, but didn't index. Use the Reasons response attribute to determine why a face wasn't indexed.

" + }, + "UnindexedFaces":{ + "type":"list", + "member":{"shape":"UnindexedFace"} + }, "Url":{"type":"string"}, "Urls":{ "type":"list", diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 6ac6bf10..f4609d29 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -10,7 +10,6 @@ "serviceFullName":"Amazon Simple Storage Service", "serviceId":"S3", "signatureVersion":"s3", - "timestampFormat":"rfc822", "uid":"s3-2006-03-01" }, "operations":{ @@ -734,7 +733,7 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"

Creates a new replication configuration (or replaces an existing one, if present).

" + "documentation":"

Creates a new replication configuration (or replaces an existing one, if present). For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

" }, "PutBucketRequestPayment":{ "name":"PutBucketRequestPayment", @@ -2150,6 +2149,7 @@ "members":{ "Bucket":{ "shape":"BucketName", + "documentation":"

Deletes the replication subresource associated with the specified bucket.

There is usually some time lag before replication configuration deletion is fully propagated to all the Amazon S3 systems.

For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

", "location":"uri", "locationName":"Bucket" } @@ -2211,6 +2211,23 @@ } } }, + "DeleteMarkerReplication":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"DeleteMarkerReplicationStatus", + "documentation":"

The status of the delete marker replication.

In the current implementation, Amazon S3 does not replicate the delete markers. Therefore, the status must be Disabled.

" + } + }, + "documentation":"

Specifies whether Amazon S3 should replicate delete makers.

" + }, + "DeleteMarkerReplicationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "DeleteMarkerVersionId":{"type":"string"}, "DeleteMarkers":{ "type":"list", @@ -2379,11 +2396,11 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.

" + "documentation":"

Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.

If you have multiple rules in your replication configuration, all rules must specify the same bucket as the destination. A replication configuration can replicate objects only to one destination bucket.

" }, "Account":{ "shape":"AccountId", - "documentation":"

Account ID of the destination bucket. Currently this is only being verified if Access Control Translation is enabled

" + "documentation":"

Account ID of the destination bucket. Currently Amazon S3 verifies this value only if Access Control Translation is enabled.

In a cross-account scenario, if you tell Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by adding the AccessControlTranslation element, this is the account ID of the destination bucket owner.

" }, "StorageClass":{ "shape":"StorageClass", @@ -2391,11 +2408,11 @@ }, "AccessControlTranslation":{ "shape":"AccessControlTranslation", - "documentation":"

Container for information regarding the access control for replicas.

" + "documentation":"

Container for information regarding the access control for replicas.

Use only in a cross-account scenario, where source and destination bucket owners are not the same, when you want to change replica ownership to the AWS account that owns the destination bucket. If you don't add this element to the replication configuration, the replicas are owned by same AWS account that owns the source object.

" }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", - "documentation":"

Container for information regarding encryption based configuration for replicas.

" + "documentation":"

Container that provides encryption-related information. You must specify this element if the SourceSelectionCriteria is specified.

" } }, "documentation":"

Container for replication destination information.

" @@ -2433,7 +2450,7 @@ "members":{ "ReplicaKmsKeyID":{ "shape":"ReplicaKmsKeyID", - "documentation":"

The id of the KMS key used to encrypt the replica object.

" + "documentation":"

The ID of the AWS KMS key for the region where the destination bucket resides. Amazon S3 uses this key to encrypt the replica object.

" } }, "documentation":"

Container for information regarding encryption based configuration for replicas.

" @@ -3693,6 +3710,10 @@ "JSON":{ "shape":"JSONInput", "documentation":"

Specifies JSON as object's input serialization format.

" + }, + "Parquet":{ + "shape":"ParquetInput", + "documentation":"

Specifies Parquet as object's input serialization format.

" } }, "documentation":"

Describes the serialization format of the object.

" @@ -5002,6 +5023,11 @@ "type":"string", "enum":["Destination"] }, + "ParquetInput":{ + "type":"structure", + "members":{ + } + }, "Part":{ "type":"structure", "members":{ @@ -5050,6 +5076,7 @@ }, "Policy":{"type":"string"}, "Prefix":{"type":"string"}, + "Priority":{"type":"integer"}, "Progress":{ "type":"structure", "members":{ @@ -6043,7 +6070,7 @@ }, "Rules":{ "shape":"ReplicationRules", - "documentation":"

Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules.

", + "documentation":"

Container for one or more replication rules. Replication configuration must have at least one rule and can contain up to 1,000 rules.

", "locationName":"Rule" } }, @@ -6052,7 +6079,6 @@ "ReplicationRule":{ "type":"structure", "required":[ - "Prefix", "Status", "Destination" ], @@ -6061,25 +6087,61 @@ "shape":"ID", "documentation":"

Unique identifier for the rule. The value cannot be longer than 255 characters.

" }, + "Priority":{ + "shape":"Priority", + "documentation":"

The priority associated with the rule. If you specify multiple rules in a replication configuration, then Amazon S3 applies rule priority in the event there are conflicts (two or more rules identify the same object based on filter specified). The rule with higher priority takes precedence. For example,

  • Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap.

  • Same object qualify tag based filter criteria specified in multiple rules

For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

" + }, "Prefix":{ "shape":"Prefix", - "documentation":"

Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported.

" + "documentation":"

Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters.

", + "deprecated":true }, + "Filter":{"shape":"ReplicationRuleFilter"}, "Status":{ "shape":"ReplicationRuleStatus", "documentation":"

The rule is ignored if status is not Enabled.

" }, "SourceSelectionCriteria":{ "shape":"SourceSelectionCriteria", - "documentation":"

Container for filters that define which source objects should be replicated.

" + "documentation":"

Container that describes additional filters in identifying source objects that you want to replicate. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-managed key. You can choose to enable or disable replication of these objects.

if you want Amazon S3 to replicate objects created with server-side encryption using AWS KMS-managed keys.

" }, "Destination":{ "shape":"Destination", "documentation":"

Container for replication destination information.

" - } + }, + "DeleteMarkerReplication":{"shape":"DeleteMarkerReplication"} }, "documentation":"

Container for information about a particular replication rule.

" }, + "ReplicationRuleAndOperator":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"Prefix"}, + "Tags":{ + "shape":"TagSet", + "flattened":true, + "locationName":"Tag" + } + } + }, + "ReplicationRuleFilter":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

Object keyname prefix that identifies subset of objects to which the rule applies.

" + }, + "Tag":{ + "shape":"Tag", + "documentation":"

Container for specifying a tag key and value.

The rule applies only to objects having the tag in its tagset.

" + }, + "And":{ + "shape":"ReplicationRuleAndOperator", + "documentation":"

Container for specifying rule filters. These filters determine the subset of objects to which the rule applies. The element is required only if you specify more than one filter. For example:

  • You specify both a Prefix and a Tag filters. Then you wrap these in an And tag.

  • You specify filter based on multiple tags. Then you wrap the Tag elements in an And tag.

" + } + }, + "documentation":"

Filter that identifies subset of objects to which the replication rule applies. A Filter must specify exactly one Prefix, Tag, or an And child element.

" + }, "ReplicationRuleStatus":{ "type":"string", "enum":[ @@ -6538,7 +6600,7 @@ "members":{ "SseKmsEncryptedObjects":{ "shape":"SseKmsEncryptedObjects", - "documentation":"

Container for filter information of selection of KMS Encrypted S3 objects.

" + "documentation":"

Container for filter information of selection of KMS Encrypted S3 objects. The element is required if you include SourceSelectionCriteria in the replication configuration.

" } }, "documentation":"

Container for filters that define which source objects should be replicated.

" diff --git a/botocore/data/sagemaker-runtime/2017-05-13/service-2.json b/botocore/data/sagemaker-runtime/2017-05-13/service-2.json index c39b4fff..6de0f98b 100644 --- a/botocore/data/sagemaker-runtime/2017-05-13/service-2.json +++ b/botocore/data/sagemaker-runtime/2017-05-13/service-2.json @@ -26,7 +26,7 @@ {"shape":"ValidationError"}, {"shape":"ModelError"} ], - "documentation":"

After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.

For an overview of Amazon SageMaker, see How It Works

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

" + "documentation":"

After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.

For an overview of Amazon SageMaker, see How It Works.

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

Cals to InvokeEndpoint are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference.

Endpoints are scoped to an individual account, and are not public. The URL does not contain the account ID, but Amazon SageMaker determines the account ID from the authentication token that is supplied by the caller.

" } }, "shapes":{ @@ -35,6 +35,11 @@ "max":5242880, "sensitive":true }, + "CustomAttributesHeader":{ + "type":"string", + "max":1024, + "sensitive":true + }, "EndpointName":{ "type":"string", "max":63, @@ -49,10 +54,11 @@ "members":{ "Message":{"shape":"Message"} }, - "documentation":"

Internal failure occurred.

", + "documentation":"

An internal failure occurred.

", "error":{"httpStatusCode":500}, "exception":true, - "fault":true + "fault":true, + "synthetic":true }, "InvokeEndpointInput":{ "type":"structure", @@ -69,7 +75,7 @@ }, "Body":{ "shape":"BodyBlob", - "documentation":"

Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

" + "documentation":"

Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

For information about the format of the request body, see Common Data Formats—Inference.

" }, "ContentType":{ "shape":"Header", @@ -82,6 +88,12 @@ "documentation":"

The desired MIME type of the inference in the response.

", "location":"header", "locationName":"Accept" + }, + "CustomAttributes":{ + "shape":"CustomAttributesHeader", + "documentation":"

", + "location":"header", + "locationName":"X-Amzn-SageMaker-Custom-Attributes" } }, "payload":"Body" @@ -92,7 +104,7 @@ "members":{ "Body":{ "shape":"BodyBlob", - "documentation":"

Includes the inference provided by the model.

" + "documentation":"

Includes the inference provided by the model.

For information about the format of the response body, see Common Data Formats—Inference.

" }, "ContentType":{ "shape":"Header", @@ -105,6 +117,12 @@ "documentation":"

Identifies the production variant that was invoked.

", "location":"header", "locationName":"x-Amzn-Invoked-Production-Variant" + }, + "CustomAttributes":{ + "shape":"CustomAttributesHeader", + "documentation":"

", + "location":"header", + "locationName":"X-Amzn-SageMaker-Custom-Attributes" } }, "payload":"Body" @@ -128,7 +146,7 @@ }, "LogStreamArn":{ "shape":"LogStreamArn", - "documentation":"

Amazon Resource Name (ARN) of the log stream.

" + "documentation":"

The Amazon Resource Name (ARN) of the log stream.

" } }, "documentation":"

Model (owned by the customer in the container) returned an error 500.

", @@ -140,10 +158,11 @@ "members":{ "Message":{"shape":"Message"} }, - "documentation":"

Service is unavailable. Try your call again.

", + "documentation":"

The service is unavailable. Try your call again.

", "error":{"httpStatusCode":503}, "exception":true, - "fault":true + "fault":true, + "synthetic":true }, "StatusCode":{"type":"integer"}, "ValidationError":{ @@ -153,8 +172,9 @@ }, "documentation":"

Inspect your request and try again.

", "error":{"httpStatusCode":400}, - "exception":true + "exception":true, + "synthetic":true } }, - "documentation":"

Amazon SageMaker runtime API.

" + "documentation":"

The Amazon SageMaker runtime API.

" } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index c64dc034..e0c4f80c 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -75,7 +75,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates a model in Amazon SageMaker. In the request, you name the model and describe one or more containers. For each container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model into production.

Use this API to create a model only if you want to use Amazon SageMaker hosting services. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API.

Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

" + "documentation":"

Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model for predictions.

Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.

To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

" }, "CreateNotebookInstance":{ "name":"CreateNotebookInstance", @@ -111,7 +111,7 @@ }, "input":{"shape":"CreatePresignedNotebookInstanceUrlInput"}, "output":{"shape":"CreatePresignedNotebookInstanceUrlOutput"}, - "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

" + "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. To restrict access, attach an IAM policy that denies access to this API unless the call comes from an IP address in the specified list to every AWS Identity and Access Management user, group, or role used to access the notebook instance. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see nbi-ip-filter.

" }, "CreateTrainingJob":{ "name":"CreateTrainingJob", @@ -139,7 +139,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

  • TransformJobName - Identifies the transform job. The name must be unique within an AWS Region in an AWS account.

  • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model within an AWS Region in an AWS account.

  • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

  • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

  • TransformResources - Identifies the ML compute instances for the transform job.

For more information about how batch transformation works Amazon SageMaker, see How It Works.

" + "documentation":"

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

  • TransformJobName - Identifies the transform job. The name must be unique within an AWS Region in an AWS account.

  • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.

  • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

  • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

  • TransformResources - Identifies the ML compute instances for the transform job.

For more information about how batch transformation works Amazon SageMaker, see How It Works.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -614,7 +614,7 @@ }, "RecordWrapperType":{ "shape":"RecordWrapper", - "documentation":"

Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format, in which case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO.

In FILE mode, leave this field unset or set it to None.

" + "documentation":"

Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format, in which case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO.

In FILE mode, leave this field unset or set it to None.

" } }, "documentation":"

A channel is a named input source that training algorithms can consume.

" @@ -809,11 +809,11 @@ }, "PrimaryContainer":{ "shape":"ContainerDefinition", - "documentation":"

The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed into production.

" + "documentation":"

The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.

" }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" }, "Tags":{ "shape":"TagList", @@ -821,7 +821,7 @@ }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. For more information, see host-vpc.

" + "documentation":"

A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. VpcConfig is currently used in hosting services but not in batch transform. For more information, see host-vpc.

" } } }, @@ -1032,7 +1032,7 @@ }, "BatchStrategy":{ "shape":"BatchStrategy", - "documentation":"

Determines the number of records included in a single mini-batch. SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.

" + "documentation":"

Determines the number of records included in a single mini-batch. SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.

Batch transform will automatically split your input data into whatever payload size is specified if you set SplitType to Line and BatchStrategy to MultiRecord. There's no need to split the dataset into smaller files or to use larger payload sizes unless the records in your dataset are very large.

" }, "Environment":{ "shape":"TransformEnvironmentMap", @@ -1548,7 +1548,7 @@ }, "SecondaryStatus":{ "shape":"SecondaryStatus", - "documentation":"

Provides granular information about the system state. For more information, see TrainingJobStatus.

  • Starting - starting the training job.

  • LaunchingMLInstances - launching ML instances for the training job.

  • PreparingTrainingStack - preparing the ML instances for the training job.

  • Downloading - downloading the input data.

  • DownloadingTrainingImage - downloading the training algorithm image.

  • Training - model training is in progress.

  • Uploading - uploading the trained model.

  • Stopping - stopping the training job.

  • Stopped - the training job has stopped.

  • MaxRuntimeExceeded - the training exceed the specified the max run time, which means the training job is stopping.

  • Completed - the training job has completed.

  • Failed - the training job has failed. The failure reason is provided in the StatusMessage.

The valid values for SecondaryStatus are subject to change. They primary provide information on the progress of the training job.

" + "documentation":"

Provides granular information about the system state. For more information, see TrainingJobStatus.

  • Starting - starting the training job.

  • Downloading - downloading the input data.

  • Training - model training is in progress.

  • Uploading - uploading the trained model.

  • Stopping - stopping the training job.

  • Stopped - the training job has stopped.

  • MaxRuntimeExceeded - the training job exceeded the specified max run time and has been stopped.

  • Completed - the training job has completed.

  • Failed - the training job has failed. The failure reason is stored in the FailureReason field of DescribeTrainingJobResponse.

The valid values for SecondaryStatus are subject to change. They primarily provide information on the progress of the training job.

" }, "FailureReason":{ "shape":"FailureReason", @@ -1604,7 +1604,7 @@ }, "SecondaryStatusTransitions":{ "shape":"SecondaryStatusTransitions", - "documentation":"

A log of time-ordered secondary statuses that a training job has transitioned.

" + "documentation":"

To give an overview of the training job lifecycle, SecondaryStatusTransitions is a log of time-ordered secondary statuses that a training job has transitioned.

" } } }, @@ -1723,6 +1723,7 @@ "Disabled" ] }, + "DisassociateNotebookInstanceLifecycleConfig":{"type":"boolean"}, "EndpointArn":{ "type":"string", "max":2048, @@ -1799,6 +1800,7 @@ "OutOfService", "Creating", "Updating", + "SystemUpdating", "RollingBack", "InService", "Deleting", @@ -3407,7 +3409,7 @@ }, "EndTime":{ "shape":"Timestamp", - "documentation":"

A timestamp that shows when the secondary status has ended and the job has transitioned into another secondary status.

" + "documentation":"

A timestamp that shows when the secondary status has ended and the job has transitioned into another secondary status. The EndTime timestamp is also set after the training job has ended.

" }, "StatusMessage":{ "shape":"StatusMessage", @@ -3903,6 +3905,10 @@ "InstanceCount":{ "shape":"TransformInstanceCount", "documentation":"

The number of ML compute instances to use in the transform job. For distributed transform, provide a value greater than 1. The default value is 1.

" + }, + "VolumeKmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job.

" } }, "documentation":"

Describes the resources, including ML instance types and ML instance count, to use for transform job.

" @@ -3994,6 +4000,14 @@ "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access the notebook instance. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" + }, + "LifecycleConfigName":{ + "shape":"NotebookInstanceLifecycleConfigName", + "documentation":"

The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see notebook-lifecycle-config.

" + }, + "DisassociateLifecycleConfig":{ + "shape":"DisassociateNotebookInstanceLifecycleConfig", + "documentation":"

Set to true to remove the notebook instance lifecycle configuration currently associated with the notebook instance.

" } } }, diff --git a/botocore/data/sagemaker/2017-07-24/waiters-2.json b/botocore/data/sagemaker/2017-07-24/waiters-2.json index 0fbb61eb..d033ea33 100644 --- a/botocore/data/sagemaker/2017-07-24/waiters-2.json +++ b/botocore/data/sagemaker/2017-07-24/waiters-2.json @@ -128,6 +128,36 @@ "argument": "EndpointStatus" } ] + }, + "TransformJobCompletedOrStopped": { + "delay": 60, + "maxAttempts": 60, + "operation": "DescribeTransformJob", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "TransformJobStatus" + }, + { + "expected": "Stopped", + "matcher": "path", + "state": "success", + "argument": "TransformJobStatus" + }, + { + "expected": "Failed", + "matcher": "path", + "state": "failure", + "argument": "TransformJobStatus" + }, + { + "expected": "ValidationException", + "matcher": "error", + "state": "failure" + } + ] } } } diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index 0caeac74..89a5b5d7 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -27,7 +27,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Disables automatic scheduled rotation and cancels the rotation of a secret if one is currently in progress.

To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays set to a value greater than 0. This will immediately rotate your secret and then enable the automatic schedule.

If you cancel a rotation that is in progress, it can leave the VersionStage labels in an unexpected state. Depending on what step of the rotation was in progress, you might need to remove the staging label AWSPENDING from the partially created version, specified by the SecretVersionId response value. You should also evaluate the partially rotated new version to see if it should be deleted, which you can do by removing all staging labels from the new version's VersionStage field.

To successfully start a rotation, the staging label AWSPENDING must be in one of the following states:

  • Not be attached to any version at all

  • Attached to the same version as the staging label AWSCURRENT

If the staging label AWSPENDING is attached to a different version than the version with AWSCURRENT then the attempt to rotate fails.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CancelRotateSecret

Related operations

  • To configure rotation for a secret or to manually trigger a rotation, use RotateSecret.

  • To get the rotation configuration details for a secret, use DescribeSecret.

  • To list all of the currently available secrets, use ListSecrets.

  • To list all of the versions currently associated with a secret, use ListSecretVersionIds.

" + "documentation":"

Disables automatic scheduled rotation and cancels the rotation of a secret if one is currently in progress.

To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays set to a value greater than 0. This will immediately rotate your secret and then enable the automatic schedule.

If you cancel a rotation that is in progress, it can leave the VersionStage labels in an unexpected state. Depending on what step of the rotation was in progress, you might need to remove the staging label AWSPENDING from the partially created version, specified by the VersionId response value. You should also evaluate the partially rotated new version to see if it should be deleted, which you can do by removing all staging labels from the new version's VersionStage field.

To successfully start a rotation, the staging label AWSPENDING must be in one of the following states:

  • Not be attached to any version at all

  • Attached to the same version as the staging label AWSCURRENT

If the staging label AWSPENDING is attached to a different version than the version with AWSCURRENT then the attempt to rotate fails.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CancelRotateSecret

Related operations

  • To configure rotation for a secret or to manually trigger a rotation, use RotateSecret.

  • To get the rotation configuration details for a secret, use DescribeSecret.

  • To list all of the currently available secrets, use ListSecrets.

  • To list all of the versions currently associated with a secret, use ListSecretVersionIds.

" }, "CreateSecret":{ "name":"CreateSecret", @@ -206,7 +206,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If another version of this secret already exists, then this operation does not automatically move any staging labels other than those that you explicitly specify in the VersionStages parameter.

  • If this operation moves the staging label AWSCURRENT from another version to this version (because you included it in the StagingLabels parameter) then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a SecretVersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If another version of this secret already exists, then this operation does not automatically move any staging labels other than those that you explicitly specify in the VersionStages parameter.

  • If this operation moves the staging label AWSCURRENT from another version to this version (because you included it in the StagingLabels parameter) then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a VersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

" }, "RestoreSecret":{ "name":"RestoreSecret", @@ -238,7 +238,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If instead the AWSPENDING staging label is present but is not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" + "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If instead the AWSPENDING staging label is present but is not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" }, "TagResource":{ "name":"TagResource", @@ -289,7 +289,7 @@ {"shape":"InternalServiceError"}, {"shape":"PreconditionNotMetException"} ], - "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a SecretVersionId with the same value as the ClientRequestToken parameter already exists, the operation results in an error. You cannot modify an existing version, you can only create a new version.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a VersionId with the same value as the ClientRequestToken parameter already exists, the operation results in an error. You cannot modify an existing version, you can only create a new version.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -322,7 +322,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret for which you want to cancel a rotation request. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret for which you want to cancel a rotation request. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" } } }, @@ -354,11 +354,11 @@ "members":{ "Name":{ "shape":"NameType", - "documentation":"

Specifies the friendly name of the new secret.

The secret name must be ASCII letters, digits, or the following characters : /_+=.@-

" + "documentation":"

Specifies the friendly name of the new secret.

The secret name must be ASCII letters, digits, or the following characters : /_+=.@-

Don't end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. This is because Secrets Manager automatically adds a hyphen and six random characters at the end of the ARN.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request, then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the SecretVersionId of the new version.

", + "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request, then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -415,7 +415,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to delete the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to delete the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" } } }, @@ -438,16 +438,16 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "RecoveryWindowInDays":{ "shape":"RecoveryWindowInDaysType", - "documentation":"

(Optional) Specifies the number of days that Secrets Manager waits before it can delete the secret.

This value can range from 7 to 30 days. The default value is 30.

", + "documentation":"

(Optional) Specifies the number of days that Secrets Manager waits before it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery parameter in the same API call.

This value can range from 7 to 30 days. The default value is 30.

", "box":true }, "ForceDeleteWithoutRecovery":{ "shape":"BooleanType", - "documentation":"

(Optional) Specifies that the secret is to be deleted immediately without any recovery window. You cannot use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. It is permanently lost.

", + "documentation":"

(Optional) Specifies that the secret is to be deleted without any recovery window. You can't use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. It is permanently lost.

", "box":true } } @@ -478,7 +478,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier of the secret whose details you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

The identifier of the secret whose details you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" } } }, @@ -516,7 +516,7 @@ }, "LastRotatedDate":{ "shape":"LastRotatedDateType", - "documentation":"

The last date and time that the Secrets Manager rotation process for this secret was invoked.

", + "documentation":"

The most recent date and time that the Secrets Manager rotation process was successfully completed. This value is null if the secret has never rotated.

", "box":true }, "LastChangedDate":{ @@ -540,7 +540,7 @@ }, "VersionIdsToStages":{ "shape":"SecretVersionsToStagesMapType", - "documentation":"

A list of all of the currently assigned VersionStage staging labels and the SecretVersionId that each is attached to. Staging labels are used to keep track of the different versions during the rotation process.

A version that does not have any staging labels attached is considered deprecated and subject to deletion. Such versions are not included in this list.

" + "documentation":"

A list of all of the currently assigned VersionStage staging labels and the VersionId that each is attached to. Staging labels are used to keep track of the different versions during the rotation process.

A version that does not have any staging labels attached is considered deprecated and subject to deletion. Such versions are not included in this list.

" } } }, @@ -585,7 +585,7 @@ }, "ExcludePunctuation":{ "shape":"ExcludePunctuationType", - "documentation":"

Specifies that the generated password should not include punctuation characters. The default if you do not include this switch parameter is that punctuation characters can be included.

", + "documentation":"

Specifies that the generated password should not include punctuation characters. The default if you do not include this switch parameter is that punctuation characters can be included.

The following are the punctuation characters that can be included in the generated password if you don't explicitly exclude them with ExcludeCharacters or ExcludePunctuation:

! \" # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \\ ] ^ _ ` { | } ~

", "box":true }, "ExcludeUppercase":{ @@ -625,7 +625,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to retrieve the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to retrieve the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" } } }, @@ -652,15 +652,15 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret containing the version that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret containing the version that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "VersionId":{ "shape":"SecretVersionIdType", - "documentation":"

Specifies the unique identifier of the version of the secret that you want to retrieve. If you specify this parameter then don't specify VersionStage. If you don't specify either a VersionStage or SecretVersionId then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

This value is typically a UUID-type value with 32 hexadecimal digits.

" + "documentation":"

Specifies the unique identifier of the version of the secret that you want to retrieve. If you specify this parameter then don't specify VersionStage. If you don't specify either a VersionStage or VersionId then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

This value is typically a UUID-type value with 32 hexadecimal digits.

" }, "VersionStage":{ "shape":"SecretVersionStageType", - "documentation":"

Specifies the secret version that you want to retrieve by the staging label attached to the version.

Staging labels are used to keep track of different versions during the rotation process. If you use this parameter then don't specify SecretVersionId. If you don't specify either a VersionStage or SecretVersionId, then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

" + "documentation":"

Specifies the secret version that you want to retrieve by the staging label attached to the version.

Staging labels are used to keep track of different versions during the rotation process. If you use this parameter then don't specify VersionId. If you don't specify either a VersionStage or VersionId, then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

" } } }, @@ -754,7 +754,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret containing the versions you want to list. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

The identifier for the secret containing the versions you want to list. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "MaxResults":{ "shape":"MaxResultsType", @@ -870,7 +870,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", @@ -897,11 +897,11 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist.

" + "documentation":"

Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.

This value becomes the SecretVersionId of the new version.

", + "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "SecretBinary":{ @@ -968,7 +968,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to restore from a previously scheduled deletion. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to restore from a previously scheduled deletion. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" } } }, @@ -991,11 +991,11 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to rotate. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to rotate. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You only need to specify your own value if you are implementing your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as the request, then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from the request then an error occurs because you cannot modify an existing secret value.

This value becomes the SecretVersionId of the new version.

", + "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You only need to specify your own value if you are implementing your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing. This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "RotationLambdaARN":{ @@ -1037,7 +1037,7 @@ "members":{ "AutomaticallyAfterDays":{ "shape":"AutomaticallyRotateAfterDaysType", - "documentation":"

Specifies the number of days between automatic scheduled rotations of the secret.

", + "documentation":"

Specifies the number of days between automatic scheduled rotations of the secret.

Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

", "box":true } }, @@ -1221,7 +1221,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "Tags":{ "shape":"TagListType", @@ -1243,7 +1243,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret that you want to remove tags from. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

The identifier for the secret that you want to remove tags from. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "TagKeys":{ "shape":"TagKeyListType", @@ -1257,11 +1257,11 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to modify or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret that you want to modify or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you want to add a new version to the secret, this parameter specifies a unique identifier for the new version that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from the request then an error occurs because you cannot modify an existing secret value.

This value becomes the SecretVersionId of the new version.

", + "documentation":"

(Optional) If you want to add a new version to the secret, this parameter specifies a unique identifier for the new version that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from the request then an error occurs because you cannot modify an existing secret value.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -1308,7 +1308,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret with the version whose list of staging labels you want to modify. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + "documentation":"

Specifies the secret with the version whose list of staging labels you want to modify. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" }, "VersionStage":{ "shape":"SecretVersionStageType", @@ -1316,12 +1316,12 @@ }, "RemoveFromVersionId":{ "shape":"SecretVersionIdType", - "documentation":"

(Optional) Specifies the secret version ID of the version that the staging labels are to be removed from.

If you want to move a label to a new version, you do not have to explicitly remove it with this parameter. Adding a label using the MoveToVersionId parameter automatically removes it from the old version. However, if you do include both the \"MoveTo\" and \"RemoveFrom\" parameters, then the move is successful only if the staging labels are actually present on the \"RemoveFrom\" version. If a staging label was on a different version than \"RemoveFrom\", then the request fails.

", + "documentation":"

Specifies the secret version ID of the version that the staging labels are to be removed from. If the staging label you are trying to attach to one version is already attached to a different version, then you must include this parameter and specify the version that the label is to be removed from. If the label is attached and you either do not specify this parameter, or the version ID does not match, then the operation fails.

", "box":true }, "MoveToVersionId":{ "shape":"SecretVersionIdType", - "documentation":"

(Optional) The secret version ID that you want to add the staging labels to.

If any of the staging labels are already attached to a different version of the secret, then they are automatically removed from that version before adding them to this version.

", + "documentation":"

(Optional) The secret version ID that you want to add the staging labels to. If you want to remove a label from a version, then do not specify this parameter.

If any of the staging labels are already attached to a different version of the secret, then you must also specify the RemoveFromVersionId parameter.

", "box":true } } diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.sdk-extras.json b/botocore/data/secretsmanager/2017-10-17/service-2.sdk-extras.json new file mode 100644 index 00000000..dc78f892 --- /dev/null +++ b/botocore/data/secretsmanager/2017-10-17/service-2.sdk-extras.json @@ -0,0 +1,8 @@ +{ + "version": 1.0, + "merge": { + "metadata": { + "serviceId": "Secrets Manager" + } + } +} diff --git a/botocore/data/ses/2010-12-01/service-2.json b/botocore/data/ses/2010-12-01/service-2.json index ed07be63..ac2bd549 100644 --- a/botocore/data/ses/2010-12-01/service-2.json +++ b/botocore/data/ses/2010-12-01/service-2.json @@ -695,7 +695,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

Composes an email message to multiple destinations. The message body is created using an email template.

In order to send email using the SendBulkTemplatedEmail operation, your call to the API must meet the following requirements:

  • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

  • The message must be sent from a verified email address or domain.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The total size of the message, including attachments, must be less than 10 MB.

  • Each Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

" + "documentation":"

Composes an email message to multiple destinations. The message body is created using an email template.

In order to send email using the SendBulkTemplatedEmail operation, your call to the API must meet the following requirements:

  • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

  • The message must be sent from a verified email address or domain.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The maximum message size is 10 MB.

  • Each Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

  • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendBulkTemplatedEmail operation several times to send the message to each group.

  • The number of destinations you can contact in a single call to the API may be limited by your account's maximum sending rate.

" }, "SendCustomVerificationEmail":{ "name":"SendCustomVerificationEmail", @@ -735,7 +735,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

Composes an email message and immediately queues it for sending. In order to send email using the SendEmail operation, your message must meet the following requirements:

  • The message must be sent from a verified email address or domain. If you attempt to send email using a non-verified address or domain, the operation will result in an \"Email address not verified\" error.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The total size of the message, including attachments, must be smaller than 10 MB.

  • The message must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

  • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendEmail operation several times to send the message to each group.

For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

" + "documentation":"

Composes an email message and immediately queues it for sending. In order to send email using the SendEmail operation, your message must meet the following requirements:

  • The message must be sent from a verified email address or domain. If you attempt to send email using a non-verified address or domain, the operation will result in an \"Email address not verified\" error.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The maximum message size is 10 MB.

  • The message must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

  • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendEmail operation several times to send the message to each group.

For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

" }, "SendRawEmail":{ "name":"SendRawEmail", @@ -755,7 +755,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

Composes an email message and immediately queues it for sending. When calling this operation, you may specify the message headers as well as the content. The SendRawEmail operation is particularly useful for sending multipart MIME emails (such as those that contain both a plain-text and an HTML version).

In order to send email using the SendRawEmail operation, your message must meet the following requirements:

  • The message must be sent from a verified email address or domain. If you attempt to send email using a non-verified address or domain, the operation will result in an \"Email address not verified\" error.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The total size of the message, including attachments, must be smaller than 10 MB.

  • The message must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

  • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendRawEmail operation several times to send the message to each group.

For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

  • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

  • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

    • X-SES-SOURCE-ARN

    • X-SES-FROM-ARN

    • X-SES-RETURN-PATH-ARN

    Do not include these X-headers in the DKIM signature; Amazon SES will remove them before sending the email.

    For most common sending authorization scenarios, we recommend that you specify the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn parameters. If you only specify the SourceIdentityArn parameter, Amazon SES will set the From and Return Path addresses to the identity specified in SourceIdentityArn. For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

" + "documentation":"

Composes an email message and immediately queues it for sending.

This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

The SendRawEmail operation has the following requirements:

  • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an \"Email address not verified\" error.

  • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

  • The maximum message size, including attachments, is 10 MB.

  • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

  • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

  • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

  • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

Additionally, keep the following considerations in mind when using the SendRawEmail operation:

  • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

  • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

    • X-SES-SOURCE-ARN

    • X-SES-FROM-ARN

    • X-SES-RETURN-PATH-ARN

    Do not include these X-headers in the DKIM signature; Amazon SES will remove them before sending the email.

    For most common sending authorization scenarios, we recommend that you specify the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn parameters. If you only specify the SourceIdentityArn parameter, Amazon SES will set the From and Return Path addresses to the identity specified in SourceIdentityArn. For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

  • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

" }, "SendTemplatedEmail":{ "name":"SendTemplatedEmail", @@ -776,7 +776,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

Composes an email message using an email template and immediately queues it for sending.

In order to send email using the SendTemplatedEmail operation, your call to the API must meet the following requirements:

  • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

  • The message must be sent from a verified email address or domain.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The total size of the message, including attachments, must be less than 10 MB.

  • Calls to the SendTemplatedEmail operation may only include one Destination parameter. A destination is a set of recipients who will receive the same version of the email. The Destination parameter can include up to 50 recipients, across the To:, CC: and BCC: fields.

  • The Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

If your call to the SendTemplatedEmail operation includes all of the required parameters, Amazon SES accepts it and returns a Message ID. However, if Amazon SES can't render the email because the template contains errors, it doesn't send the email. Additionally, because it already accepted the message, Amazon SES doesn't return a message stating that it was unable to send the email.

For these reasons, we highly recommend that you set up Amazon SES to send you notifications when Rendering Failure events occur. For more information, see Sending Personalized Email Using the Amazon SES API in the Amazon Simple Email Service Developer Guide.

" + "documentation":"

Composes an email message using an email template and immediately queues it for sending.

In order to send email using the SendTemplatedEmail operation, your call to the API must meet the following requirements:

  • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

  • The message must be sent from a verified email address or domain.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The maximum message size is 10 MB.

  • Calls to the SendTemplatedEmail operation may only include one Destination parameter. A destination is a set of recipients who will receive the same version of the email. The Destination parameter can include up to 50 recipients, across the To:, CC: and BCC: fields.

  • The Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

If your call to the SendTemplatedEmail operation includes all of the required parameters, Amazon SES accepts it and returns a Message ID. However, if Amazon SES can't render the email because the template contains errors, it doesn't send the email. Additionally, because it already accepted the message, Amazon SES doesn't return a message stating that it was unable to send the email.

For these reasons, we highly recommend that you set up Amazon SES to send you notifications when Rendering Failure events occur. For more information, see Sending Personalized Email Using the Amazon SES API in the Amazon Simple Email Service Developer Guide.

" }, "SetActiveReceiptRuleSet":{ "name":"SetActiveReceiptRuleSet", @@ -857,7 +857,7 @@ "shape":"SetIdentityNotificationTopicResponse", "resultWrapper":"SetIdentityNotificationTopicResult" }, - "documentation":"

Given an identity (an email address or a domain), sets the Amazon Simple Notification Service (Amazon SNS) topic to which Amazon SES will publish bounce, complaint, and/or delivery notifications for emails sent with that identity as the Source.

Unless feedback forwarding is enabled, you must specify Amazon SNS topics for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled.

You can execute this operation no more than once per second.

For more information about feedback notification, see the Amazon SES Developer Guide.

" + "documentation":"

Sets an Amazon Simple Notification Service (Amazon SNS) topic to use when delivering notifications. When you use this operation, you specify a verified identity, such as an email address or domain. When you send an email that uses the chosen identity in the Source field, Amazon SES sends notifications to the topic you specified. You can send bounce, complaint, or delivery notifications (or any combination of the three) to the Amazon SNS topic that you specify.

You can execute this operation no more than once per second.

For more information about feedback notification, see the Amazon SES Developer Guide.

" }, "SetReceiptRulePosition":{ "name":"SetReceiptRulePosition", @@ -3711,7 +3711,7 @@ }, "RawMessage":{ "shape":"RawMessage", - "documentation":"

The raw text of the message. The client is responsible for ensuring the following:

  • Message must contain a header and a body, separated by a blank line.

  • All required header fields must be present.

  • Each part of a multipart MIME message must be formatted properly.

  • MIME content types must be among those supported by Amazon SES. For more information, go to the Amazon SES Developer Guide.

  • Must be base64-encoded.

  • Per RFC 5321, the maximum length of each line of text, including the <CRLF>, must not exceed 1,000 characters.

" + "documentation":"

The raw email message itself. The message has to meet the following criteria:

  • The message has to contain a header and a body, separated by a blank line.

  • All of the required header fields must be present in the message.

  • Each part of a multipart MIME message must be formatted properly.

  • Attachments must be of a content type that Amazon SES supports. For a list on unsupported content types, see Unsupported Attachment Types in the Amazon SES Developer Guide.

  • The entire message must be base64-encoded.

  • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, we highly recommend that you encode that content. For more information, see Sending Raw Email in the Amazon SES Developer Guide.

  • Per RFC 5321, the maximum length of each line of text, including the <CRLF>, must not exceed 1,000 characters.

" }, "FromArn":{ "shape":"AmazonResourceName", @@ -3941,7 +3941,7 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

The identity for which the Amazon SNS topic will be set. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

" + "documentation":"

The identity (email address or domain) that you want to set the Amazon SNS topic for.

You can only specify a verified identity for this parameter.

You can specify an identity by using its name or by using its Amazon Resource Name (ARN). The following examples are all valid identities: sender@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

" }, "NotificationType":{ "shape":"NotificationType", diff --git a/botocore/data/signer/2017-08-25/paginators-1.json b/botocore/data/signer/2017-08-25/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/signer/2017-08-25/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/signer/2017-08-25/service-2.json b/botocore/data/signer/2017-08-25/service-2.json new file mode 100644 index 00000000..c9df5c96 --- /dev/null +++ b/botocore/data/signer/2017-08-25/service-2.json @@ -0,0 +1,966 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-08-25", + "endpointPrefix":"signer", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"signer", + "serviceFullName":"AWS Signer", + "serviceId":"signer", + "signatureVersion":"v4", + "signingName":"signer", + "uid":"signer-2017-08-25" + }, + "operations":{ + "CancelSigningProfile":{ + "name":"CancelSigningProfile", + "http":{ + "method":"DELETE", + "requestUri":"/signing-profiles/{profileName}" + }, + "input":{"shape":"CancelSigningProfileRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Changes the state of an ACTIVE signing profile to CANCELED. A canceled profile is still viewable with the ListSigningProfiles operation, but it cannot perform new signing jobs, and is deleted two years after cancelation.

" + }, + "DescribeSigningJob":{ + "name":"DescribeSigningJob", + "http":{ + "method":"GET", + "requestUri":"/signing-jobs/{jobId}" + }, + "input":{"shape":"DescribeSigningJobRequest"}, + "output":{"shape":"DescribeSigningJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Returns information about a specific code signing job. You specify the job by using the jobId value that is returned by the StartSigningJob operation.

" + }, + "GetSigningPlatform":{ + "name":"GetSigningPlatform", + "http":{ + "method":"GET", + "requestUri":"/signing-platforms/{platformId}" + }, + "input":{"shape":"GetSigningPlatformRequest"}, + "output":{"shape":"GetSigningPlatformResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Returns information on a specific signing platform.

" + }, + "GetSigningProfile":{ + "name":"GetSigningProfile", + "http":{ + "method":"GET", + "requestUri":"/signing-profiles/{profileName}" + }, + "input":{"shape":"GetSigningProfileRequest"}, + "output":{"shape":"GetSigningProfileResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Returns information on a specific signing profile.

" + }, + "ListSigningJobs":{ + "name":"ListSigningJobs", + "http":{ + "method":"GET", + "requestUri":"/signing-jobs" + }, + "input":{"shape":"ListSigningJobsRequest"}, + "output":{"shape":"ListSigningJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Lists all your signing jobs. You can use the maxResults parameter to limit the number of signing jobs that are returned in the response. If additional jobs remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that AWS Signer returns in the nextToken parameter until all of your signing jobs have been returned.

" + }, + "ListSigningPlatforms":{ + "name":"ListSigningPlatforms", + "http":{ + "method":"GET", + "requestUri":"/signing-platforms" + }, + "input":{"shape":"ListSigningPlatformsRequest"}, + "output":{"shape":"ListSigningPlatformsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Lists all signing platforms available in AWS Signer that match the request parameters. If additional jobs remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that AWS Signer returns in the nextToken parameter until all of your signing jobs have been returned.

" + }, + "ListSigningProfiles":{ + "name":"ListSigningProfiles", + "http":{ + "method":"GET", + "requestUri":"/signing-profiles" + }, + "input":{"shape":"ListSigningProfilesRequest"}, + "output":{"shape":"ListSigningProfilesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Lists all available signing profiles in your AWS account. Returns only profiles with an ACTIVE status unless the includeCanceled request field is set to true. If additional jobs remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that AWS Signer returns in the nextToken parameter until all of your signing jobs have been returned.

" + }, + "PutSigningProfile":{ + "name":"PutSigningProfile", + "http":{ + "method":"PUT", + "requestUri":"/signing-profiles/{profileName}" + }, + "input":{"shape":"PutSigningProfileRequest"}, + "output":{"shape":"PutSigningProfileResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Creates a signing profile. A signing profile is an AWS Signer template that can be used to carry out a pre-defined signing job. For more information, see http://docs.aws.amazon.com/signer/latest/developerguide/gs-profile.html

" + }, + "StartSigningJob":{ + "name":"StartSigningJob", + "http":{ + "method":"POST", + "requestUri":"/signing-jobs" + }, + "input":{"shape":"StartSigningJobRequest"}, + "output":{"shape":"StartSigningJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation for two years after they are performed. Note the following requirements:

  • You must create an Amazon S3 source bucket. For more information, see Create a Bucket in the Amazon S3 Getting Started Guide.

  • Your S3 source bucket must be version enabled.

  • You must create an S3 destination bucket. AWS Signer uses your S3 destination bucket to write your signed code.

  • You specify the name of the source and destination buckets when calling the StartSigningJob operation.

  • You must also specify a request token that identifies your request to AWS Signer.

You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob.

For a Java example that shows how to use this action, see http://docs.aws.amazon.com/acm/latest/userguide/

" + } + }, + "shapes":{ + "key":{"type":"string"}, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "BucketName":{"type":"string"}, + "CancelSigningProfileRequest":{ + "type":"structure", + "required":["profileName"], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the signing profile to be canceled.

", + "location":"uri", + "locationName":"profileName" + } + } + }, + "Category":{ + "type":"string", + "enum":["AWSIoT"] + }, + "CertificateArn":{"type":"string"}, + "ClientRequestToken":{"type":"string"}, + "CompletedAt":{"type":"timestamp"}, + "CreatedAt":{"type":"timestamp"}, + "DescribeSigningJobRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the signing job on input.

", + "location":"uri", + "locationName":"jobId" + } + } + }, + "DescribeSigningJobResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the signing job on output.

" + }, + "source":{ + "shape":"Source", + "documentation":"

The object that contains the name of your S3 bucket or your raw code.

" + }, + "signingMaterial":{ + "shape":"SigningMaterial", + "documentation":"

Amazon Resource Name (ARN) of your code signing certificate.

" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

The microcontroller platform to which your signed code image will be distributed.

" + }, + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the profile that initiated the signing operation.

" + }, + "overrides":{ + "shape":"SigningPlatformOverrides", + "documentation":"

A list of any overrides that were applied to the signing operation.

" + }, + "signingParameters":{ + "shape":"SigningParameters", + "documentation":"

Map of user-assigned key-value pairs used during signing. These values contain any information that you specified for use in your signing job.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

Date and time that the signing job was created.

" + }, + "completedAt":{ + "shape":"CompletedAt", + "documentation":"

Date and time that the signing job was completed.

" + }, + "requestedBy":{ + "shape":"RequestedBy", + "documentation":"

The IAM principal that requested the signing job.

" + }, + "status":{ + "shape":"SigningStatus", + "documentation":"

Status of the signing job.

" + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

String value that contains the status reason.

" + }, + "signedObject":{ + "shape":"SignedObject", + "documentation":"

Name of the S3 bucket where the signed code image is saved by AWS Signer.

" + } + } + }, + "Destination":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"S3Destination", + "documentation":"

The S3Destination object.

" + } + }, + "documentation":"

Points to an S3Destination object that contains information about your S3 bucket.

" + }, + "DisplayName":{"type":"string"}, + "EncryptionAlgorithm":{ + "type":"string", + "enum":[ + "RSA", + "ECDSA" + ] + }, + "EncryptionAlgorithmOptions":{ + "type":"structure", + "required":[ + "allowedValues", + "defaultValue" + ], + "members":{ + "allowedValues":{ + "shape":"EncryptionAlgorithms", + "documentation":"

The set of accepted encryption algorithms that are allowed in an AWS Signer job.

" + }, + "defaultValue":{ + "shape":"EncryptionAlgorithm", + "documentation":"

The default encryption algorithm that is used by an AWS Signer job.

" + } + }, + "documentation":"

The encryption algorithm options that are available to an AWS Signer job.

" + }, + "EncryptionAlgorithms":{ + "type":"list", + "member":{"shape":"EncryptionAlgorithm"} + }, + "ErrorMessage":{"type":"string"}, + "GetSigningPlatformRequest":{ + "type":"structure", + "required":["platformId"], + "members":{ + "platformId":{ + "shape":"PlatformId", + "documentation":"

The ID of the target signing platform.

", + "location":"uri", + "locationName":"platformId" + } + } + }, + "GetSigningPlatformResponse":{ + "type":"structure", + "members":{ + "platformId":{ + "shape":"PlatformId", + "documentation":"

The ID of the target signing platform.

" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

The display name of the target signing platform.

" + }, + "partner":{ + "shape":"String", + "documentation":"

A list of partner entities that use the target signing platform.

" + }, + "target":{ + "shape":"String", + "documentation":"

The validation template that is used by the target signing platform.

" + }, + "category":{ + "shape":"Category", + "documentation":"

The category type of the target signing platform.

" + }, + "signingConfiguration":{ + "shape":"SigningConfiguration", + "documentation":"

A list of configurations applied to the target platform at signing.

" + }, + "signingImageFormat":{ + "shape":"SigningImageFormat", + "documentation":"

The format of the target platform's signing image.

" + }, + "maxSizeInMB":{ + "shape":"MaxSizeInMB", + "documentation":"

The maximum size (in MB) of the payload that can be signed by the target platform.

" + } + } + }, + "GetSigningProfileRequest":{ + "type":"structure", + "required":["profileName"], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the target signing profile.

", + "location":"uri", + "locationName":"profileName" + } + } + }, + "GetSigningProfileResponse":{ + "type":"structure", + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the target signing profile.

" + }, + "signingMaterial":{ + "shape":"SigningMaterial", + "documentation":"

The ARN of the certificate that the target profile uses for signing operations.

" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

The ID of the platform that is used by the target signing profile.

" + }, + "overrides":{ + "shape":"SigningPlatformOverrides", + "documentation":"

A list of overrides applied by the target signing profile for signing operations.

" + }, + "signingParameters":{ + "shape":"SigningParameters", + "documentation":"

A map of key-value pairs for signing operations that is attached to the target signing profile.

" + }, + "status":{ + "shape":"SigningProfileStatus", + "documentation":"

The status of the target signing profile.

" + } + } + }, + "HashAlgorithm":{ + "type":"string", + "enum":[ + "SHA1", + "SHA256" + ] + }, + "HashAlgorithmOptions":{ + "type":"structure", + "required":[ + "allowedValues", + "defaultValue" + ], + "members":{ + "allowedValues":{ + "shape":"HashAlgorithms", + "documentation":"

The set of accepted hash algorithms allowed in an AWS Signer job.

" + }, + "defaultValue":{ + "shape":"HashAlgorithm", + "documentation":"

The default hash algorithm that is used in an AWS Signer job.

" + } + }, + "documentation":"

The hash algorithms that are available to an AWS Signer job.

" + }, + "HashAlgorithms":{ + "type":"list", + "member":{"shape":"HashAlgorithm"} + }, + "ImageFormat":{ + "type":"string", + "enum":["JSON"] + }, + "ImageFormats":{ + "type":"list", + "member":{"shape":"ImageFormat"} + }, + "InternalServiceErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

An internal error occurred.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "JobId":{"type":"string"}, + "Key":{"type":"string"}, + "ListSigningJobsRequest":{ + "type":"structure", + "members":{ + "status":{ + "shape":"SigningStatus", + "documentation":"

A status value with which to filter your results.

", + "location":"querystring", + "locationName":"status" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

The ID of microcontroller platform that you specified for the distribution of your code image.

", + "location":"querystring", + "locationName":"platformId" + }, + "requestedBy":{ + "shape":"RequestedBy", + "documentation":"

The IAM principal that requested the signing job.

", + "location":"querystring", + "locationName":"requestedBy" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of items to return in the response. Use this parameter when paginating results. If additional items exist beyond the number you specify, the nextToken element is set in the response. Use the nextToken value in a subsequent request to retrieve additional items.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

String for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSigningJobsResponse":{ + "type":"structure", + "members":{ + "jobs":{ + "shape":"SigningJobs", + "documentation":"

A list of your signing jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

String for specifying the next set of paginated results.

" + } + } + }, + "ListSigningPlatformsRequest":{ + "type":"structure", + "members":{ + "category":{ + "shape":"String", + "documentation":"

The category type of a signing platform.

", + "location":"querystring", + "locationName":"category" + }, + "partner":{ + "shape":"String", + "documentation":"

Any partner entities connected to a signing platform.

", + "location":"querystring", + "locationName":"partner" + }, + "target":{ + "shape":"String", + "documentation":"

The validation template that is used by the target signing platform.

", + "location":"querystring", + "locationName":"target" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned by this operation.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Value for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSigningPlatformsResponse":{ + "type":"structure", + "members":{ + "platforms":{ + "shape":"SigningPlatforms", + "documentation":"

A list of all platforms that match the request parameters.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

Value for specifying the next set of paginated results to return.

" + } + } + }, + "ListSigningProfilesRequest":{ + "type":"structure", + "members":{ + "includeCanceled":{ + "shape":"bool", + "documentation":"

Designates whether to include profiles with the status of CANCELED.

", + "location":"querystring", + "locationName":"includeCanceled" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of profiles to be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Value for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSigningProfilesResponse":{ + "type":"structure", + "members":{ + "profiles":{ + "shape":"SigningProfiles", + "documentation":"

A list of profiles that are available in the AWS account. This includes profiles with the status of CANCELED if the includeCanceled parameter is set to true.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Value for specifying the next set of paginated results to return.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "MaxSizeInMB":{"type":"integer"}, + "NextToken":{"type":"string"}, + "PlatformId":{"type":"string"}, + "Prefix":{"type":"string"}, + "ProfileName":{ + "type":"string", + "max":20, + "min":2, + "pattern":"^[a-zA-Z0-9_]{2,}" + }, + "PutSigningProfileRequest":{ + "type":"structure", + "required":[ + "profileName", + "signingMaterial", + "platformId" + ], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the signing profile to be created.

", + "location":"uri", + "locationName":"profileName" + }, + "signingMaterial":{ + "shape":"SigningMaterial", + "documentation":"

The AWS Certificate Manager certificate that will be used to sign code with the new signing profile.

" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

The ID of the signing profile to be created.

" + }, + "overrides":{ + "shape":"SigningPlatformOverrides", + "documentation":"

A subfield of platform. This specifies any different configuration options that you want to apply to the chosen platform (such as a different hash-algorithm or signing-algorithm).

" + }, + "signingParameters":{ + "shape":"SigningParameters", + "documentation":"

Map of key-value pairs for signing. These can include any information that you want to use during signing.

" + } + } + }, + "PutSigningProfileResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"string", + "documentation":"

The Amazon Resource Name (ARN) of the signing profile created.

" + } + } + }, + "RequestedBy":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

A specified resource could not be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "S3Destination":{ + "type":"structure", + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

Name of the S3 bucket.

" + }, + "prefix":{ + "shape":"Prefix", + "documentation":"

An Amazon S3 prefix that you can use to limit responses to those that begin with the specified prefix.

" + } + }, + "documentation":"

The name and prefix of the S3 bucket where AWS Signer saves your signed objects.

" + }, + "S3SignedObject":{ + "type":"structure", + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

Name of the S3 bucket.

" + }, + "key":{ + "shape":"key", + "documentation":"

Key name that uniquely identifies a signed code image in your bucket.

" + } + }, + "documentation":"

The S3 bucket name and key where AWS Signer saved your signed code image.

" + }, + "S3Source":{ + "type":"structure", + "required":[ + "bucketName", + "key", + "version" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

Name of the S3 bucket.

" + }, + "key":{ + "shape":"Key", + "documentation":"

Key name of the bucket object that contains your unsigned code.

" + }, + "version":{ + "shape":"Version", + "documentation":"

Version of your source image in your version enabled S3 bucket.

" + } + }, + "documentation":"

Information about the S3 bucket where you saved your unsigned code.

" + }, + "SignedObject":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"S3SignedObject", + "documentation":"

The S3SignedObject.

" + } + }, + "documentation":"

Points to an S3SignedObject object that contains information about your signed code image.

" + }, + "SigningConfiguration":{ + "type":"structure", + "required":[ + "encryptionAlgorithmOptions", + "hashAlgorithmOptions" + ], + "members":{ + "encryptionAlgorithmOptions":{ + "shape":"EncryptionAlgorithmOptions", + "documentation":"

The encryption algorithm options that are available for an AWS Signer job.

" + }, + "hashAlgorithmOptions":{ + "shape":"HashAlgorithmOptions", + "documentation":"

The hash algorithm options that are available for an AWS Signer job.

" + } + }, + "documentation":"

The configuration of an AWS Signer operation.

" + }, + "SigningConfigurationOverrides":{ + "type":"structure", + "members":{ + "encryptionAlgorithm":{ + "shape":"EncryptionAlgorithm", + "documentation":"

A specified override of the default encryption algorithm that is used in an AWS Signer job.

" + }, + "hashAlgorithm":{ + "shape":"HashAlgorithm", + "documentation":"

A specified override of the default hash algorithm that is used in an AWS Signer job.

" + } + }, + "documentation":"

A signing configuration that overrides the default encryption or hash algorithm of a signing job.

" + }, + "SigningImageFormat":{ + "type":"structure", + "required":[ + "supportedFormats", + "defaultFormat" + ], + "members":{ + "supportedFormats":{ + "shape":"ImageFormats", + "documentation":"

The supported formats of an AWS Signer signing image.

" + }, + "defaultFormat":{ + "shape":"ImageFormat", + "documentation":"

The default format of an AWS Signer signing image.

" + } + }, + "documentation":"

The image format of an AWS Signer platform or profile.

" + }, + "SigningJob":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the signing job.

" + }, + "source":{ + "shape":"Source", + "documentation":"

A Source that contains information about a signing job's code image source.

" + }, + "signedObject":{ + "shape":"SignedObject", + "documentation":"

A SignedObject structure that contains information about a signing job's signed code image.

" + }, + "signingMaterial":{ + "shape":"SigningMaterial", + "documentation":"

A SigningMaterial object that contains the Amazon Resource Name (ARN) of the certificate used for the signing job.

" + }, + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The date and time that the signing job was created.

" + }, + "status":{ + "shape":"SigningStatus", + "documentation":"

The status of the signing job.

" + } + }, + "documentation":"

Contains information about a signing job.

" + }, + "SigningJobs":{ + "type":"list", + "member":{"shape":"SigningJob"} + }, + "SigningMaterial":{ + "type":"structure", + "required":["certificateArn"], + "members":{ + "certificateArn":{ + "shape":"CertificateArn", + "documentation":"

The Amazon Resource Name (ARN) of the certificates that is used to sign your code.

" + } + }, + "documentation":"

The ACM certificate that is used to sign your code.

" + }, + "SigningParameterKey":{"type":"string"}, + "SigningParameterValue":{"type":"string"}, + "SigningParameters":{ + "type":"map", + "key":{"shape":"SigningParameterKey"}, + "value":{"shape":"SigningParameterValue"} + }, + "SigningPlatform":{ + "type":"structure", + "members":{ + "platformId":{ + "shape":"String", + "documentation":"

The ID of an AWS Signer platform.

" + }, + "displayName":{ + "shape":"String", + "documentation":"

The display name of an AWS Signer platform.

" + }, + "partner":{ + "shape":"String", + "documentation":"

Any partner entities linked to an AWS Signer platform.

" + }, + "target":{ + "shape":"String", + "documentation":"

The types of targets that can be signed by an AWS Signer platform.

" + }, + "category":{ + "shape":"Category", + "documentation":"

The category of an AWS Signer platform.

" + }, + "signingConfiguration":{ + "shape":"SigningConfiguration", + "documentation":"

The configuration of an AWS Signer platform. This includes the designated hash algorithm and encryption algorithm of a signing platform.

" + }, + "signingImageFormat":{ + "shape":"SigningImageFormat", + "documentation":"

The signing image format that is used by an AWS Signer platform.

" + }, + "maxSizeInMB":{ + "shape":"MaxSizeInMB", + "documentation":"

The maximum size (in MB) of code that can be signed by a AWS Signer platform.

" + } + }, + "documentation":"

Contains information about the signing configurations and parameters that is used to perform an AWS Signer job.

" + }, + "SigningPlatformOverrides":{ + "type":"structure", + "members":{ + "signingConfiguration":{"shape":"SigningConfigurationOverrides"} + }, + "documentation":"

Any overrides that are applied to the signing configuration of an AWS Signer platform.

" + }, + "SigningPlatforms":{ + "type":"list", + "member":{"shape":"SigningPlatform"} + }, + "SigningProfile":{ + "type":"structure", + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the AWS Signer profile.

" + }, + "signingMaterial":{ + "shape":"SigningMaterial", + "documentation":"

The ACM certificate that is available for use by a signing profile.

" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

The ID of a platform that is available for use by a signing profile.

" + }, + "signingParameters":{ + "shape":"SigningParameters", + "documentation":"

The parameters that are available for use by an AWS Signer user.

" + }, + "status":{ + "shape":"SigningProfileStatus", + "documentation":"

The status of an AWS Signer profile.

" + } + }, + "documentation":"

Contains information about the ACM certificates and AWS Signer configuration parameters that can be used by a given AWS Signer user.

" + }, + "SigningProfileStatus":{ + "type":"string", + "enum":[ + "Active", + "Canceled" + ] + }, + "SigningProfiles":{ + "type":"list", + "member":{"shape":"SigningProfile"} + }, + "SigningStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Failed", + "Succeeded" + ] + }, + "Source":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"S3Source", + "documentation":"

The S3Source object.

" + } + }, + "documentation":"

An S3Source object that contains information about the S3 bucket where you saved your unsigned code.

" + }, + "StartSigningJobRequest":{ + "type":"structure", + "required":[ + "source", + "destination", + "clientRequestToken" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"

The S3 bucket that contains the object to sign or a BLOB that contains your raw code.

" + }, + "destination":{ + "shape":"Destination", + "documentation":"

The S3 bucket in which to save your signed object. The destination contains the name of your bucket and an optional prefix.

" + }, + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the signing profile.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

String that identifies the signing request. All calls after the first that use this token return the same response as the first call.

", + "idempotencyToken":true + } + } + }, + "StartSigningJobResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of your signing job.

" + } + } + }, + "StatusReason":{"type":"string"}, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The signing job has been throttled.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You signing certificate could not be validated.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Version":{"type":"string"}, + "bool":{"type":"boolean"}, + "string":{"type":"string"} + }, + "documentation":"

You can use Code Signing for Amazon FreeRTOS (AWS Signer) to sign code that you created for any of the IoT devices that Amazon Web Services supports. AWS Signer is integrated with Amazon FreeRTOS, AWS Certificate Manager, and AWS CloudTrail. Amazon FreeRTOS customers can use AWS Signer to sign code images before making them available for microcontrollers. You can use ACM to import third-party certificates to be used by AWS Signer. For general information about using AWS Signer, see the Code Signing for Amazon FreeRTOS Developer Guide.

" +} diff --git a/botocore/data/signer/2017-08-25/waiters-2.json b/botocore/data/signer/2017-08-25/waiters-2.json new file mode 100644 index 00000000..a0890ade --- /dev/null +++ b/botocore/data/signer/2017-08-25/waiters-2.json @@ -0,0 +1,29 @@ +{ + "version": 2, + "waiters": { + "SuccessfulSigningJob": { + "delay": 20, + "operation": "DescribeSigningJob", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "Succeeded", + "matcher": "path", + "state": "success", + "argument": "status" + }, + { + "expected": "Failed", + "matcher": "path", + "state": "failure", + "argument": "status" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "failure" + } + ] + } + } +} \ No newline at end of file diff --git a/botocore/data/snowball/2016-06-30/service-2.json b/botocore/data/snowball/2016-06-30/service-2.json index 910baa80..30a87ed5 100644 --- a/botocore/data/snowball/2016-06-30/service-2.json +++ b/botocore/data/snowball/2016-06-30/service-2.json @@ -1067,6 +1067,7 @@ "InTransitToCustomer", "WithCustomer", "InTransitToAWS", + "WithAWSSortingFacility", "WithAWS", "InProgress", "Complete", diff --git a/botocore/data/sqs/2012-11-05/service-2.json b/botocore/data/sqs/2012-11-05/service-2.json index 2301a77a..98acdb36 100644 --- a/botocore/data/sqs/2012-11-05/service-2.json +++ b/botocore/data/sqs/2012-11-05/service-2.json @@ -22,7 +22,7 @@ "errors":[ {"shape":"OverLimit"} ], - "documentation":"

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Shared Queues in the Amazon Simple Queue Service Developer Guide.

AddPermission writes an Amazon-SQS-generated policy. If you want to write your own policy, use SetQueueAttributes to upload your policy. For more information about writing your own policy, see Using The Access Policy Language in the Amazon Simple Queue Service Developer Guide.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=this

&Attribute.2=that

" + "documentation":"

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

AddPermission writes an Amazon-SQS-generated policy. If you want to write your own policy, use SetQueueAttributes to upload your policy. For more information about writing your own policy, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

An Amazon SQS policy can have a maximum of 7 actions.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "ChangeMessageVisibility":{ "name":"ChangeMessageVisibility", @@ -35,7 +35,7 @@ {"shape":"MessageNotInflight"}, {"shape":"ReceiptHandleIsInvalid"} ], - "documentation":"

Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value is 12 hours. Thus, you can't extend the timeout of a message in an existing queue to more than a total visibility timeout of 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with a timeout of 10 minutes. At that time, the timeout for the message is extended by 10 minutes beyond the time of the ChangeMessageVisibility action. This results in a total visibility timeout of 13 minutes. You can continue to call the ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend the visibility timeout beyond 12 hours, your request is rejected.

A message is considered to be in flight after it's received from a queue by a consumer, but not yet deleted from the queue.

For standard queues, there can be a maximum of 120,000 inflight messages per queue. If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.

For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

" + "documentation":"

Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value is 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend the visibility timeout beyond 12 hours, your request is rejected.

A message is considered to be in flight after it's received from a queue by a consumer, but not yet deleted from the queue.

For standard queues, there can be a maximum of 120,000 inflight messages per queue. If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.

For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

" }, "ChangeMessageVisibilityBatch":{ "name":"ChangeMessageVisibilityBatch", @@ -54,7 +54,7 @@ {"shape":"BatchEntryIdsNotDistinct"}, {"shape":"InvalidBatchEntryId"} ], - "documentation":"

Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=this

&Attribute.2=that

" + "documentation":"

Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" }, "CreateQueue":{ "name":"CreateQueue", @@ -71,7 +71,7 @@ {"shape":"QueueDeletedRecently"}, {"shape":"QueueNameExists"} ], - "documentation":"

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=this

&Attribute.2=that

" + "documentation":"

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "DeleteMessage":{ "name":"DeleteMessage", @@ -84,7 +84,7 @@ {"shape":"InvalidIdFormat"}, {"shape":"ReceiptHandleIsInvalid"} ], - "documentation":"

Deletes the specified message from the specified queue. You specify the message by using the message's receipt handle and not the MessageId you receive when you send the message. Even if the message is locked by another reader due to the visibility timeout setting, it is still deleted from the queue. If you leave a message in the queue for longer than the queue's configured retention period, Amazon SQS automatically deletes the message.

The receipt handle is associated with a specific instance of receiving the message. If you receive a message more than once, the receipt handle you get each time you receive the message is different. If you don't provide the most recently received receipt handle for the message when you use the DeleteMessage action, the request succeeds, but the message might not be deleted.

For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers storing a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you on a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues.

" + "documentation":"

Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue.

The ReceiptHandle is associated with a specific instance of receiving a message. If you receive a message more than once, the ReceiptHandle is different each time you receive a message. When you use the DeleteMessage action, you must provide the most recently received ReceiptHandle for the message (otherwise, the request succeeds, but the message might not be deleted).

For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues.

" }, "DeleteMessageBatch":{ "name":"DeleteMessageBatch", @@ -103,7 +103,7 @@ {"shape":"BatchEntryIdsNotDistinct"}, {"shape":"InvalidBatchEntryId"} ], - "documentation":"

Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=this

&Attribute.2=that

" + "documentation":"

Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" }, "DeleteQueue":{ "name":"DeleteQueue", @@ -112,7 +112,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteQueueRequest"}, - "documentation":"

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

" + "documentation":"

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "GetQueueAttributes":{ "name":"GetQueueAttributes", @@ -128,7 +128,7 @@ "errors":[ {"shape":"InvalidAttributeName"} ], - "documentation":"

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=this

&Attribute.2=that

" + "documentation":"

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" }, "GetQueueUrl":{ "name":"GetQueueUrl", @@ -144,7 +144,7 @@ "errors":[ {"shape":"QueueDoesNotExist"} ], - "documentation":"

Returns the URL of an existing queue. This action provides a simple way to retrieve the URL of an Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Shared Queues in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

" }, "ListDeadLetterSourceQueues":{ "name":"ListDeadLetterSourceQueues", @@ -173,7 +173,7 @@ "shape":"ListQueueTagsResult", "resultWrapper":"ListQueueTagsResult" }, - "documentation":"

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging API actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "ListQueues":{ "name":"ListQueues", @@ -186,7 +186,7 @@ "shape":"ListQueuesResult", "resultWrapper":"ListQueuesResult" }, - "documentation":"

Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

" + "documentation":"

Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "PurgeQueue":{ "name":"PurgeQueue", @@ -199,7 +199,7 @@ {"shape":"QueueDoesNotExist"}, {"shape":"PurgeQueueInProgress"} ], - "documentation":"

Deletes the messages in a queue specified by the QueueURL parameter.

When you use the PurgeQueue action, you can't retrieve a message deleted from a queue.

When you purge a queue, the message deletion process takes up to 60 seconds. All messages sent to the queue before calling the PurgeQueue action are deleted. Messages sent to the queue while it is being purged might be deleted. While the queue is being purged, messages sent to the queue before PurgeQueue is called might be received, but are deleted within the next minute.

" + "documentation":"

Deletes the messages in a queue specified by the QueueURL parameter.

When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.

The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.

Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.

Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.

" }, "ReceiveMessage":{ "name":"ReceiveMessage", @@ -224,7 +224,7 @@ "requestUri":"/" }, "input":{"shape":"RemovePermissionRequest"}, - "documentation":"

Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of the queue can remove permissions.

" + "documentation":"

Revokes any permissions in the queue policy that matches the specified Label parameter.

Only the owner of a queue can remove permissions from it.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "SendMessage":{ "name":"SendMessage", @@ -262,7 +262,7 @@ {"shape":"InvalidBatchEntryId"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=this

&Attribute.2=that

" + "documentation":"

Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" }, "SetQueueAttributes":{ "name":"SetQueueAttributes", @@ -274,7 +274,7 @@ "errors":[ {"shape":"InvalidAttributeName"} ], - "documentation":"

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

" + "documentation":"

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "TagQueue":{ "name":"TagQueue", @@ -283,7 +283,7 @@ "requestUri":"/" }, "input":{"shape":"TagQueueRequest"}, - "documentation":"

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging API actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "UntagQueue":{ "name":"UntagQueue", @@ -292,7 +292,7 @@ "requestUri":"/" }, "input":{"shape":"UntagQueueRequest"}, - "documentation":"

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging API actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

  • Adding more than 50 tags to a queue isn't recommended.

  • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

  • Tags are case-sensitive.

  • A new tag with a key identical to that of an existing tag overwrites the existing tag.

  • Tagging actions are limited to 5 TPS per AWS account. If your application requires a higher throughput, file a technical support request.

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" } }, "shapes":{ @@ -323,7 +323,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue to which permissions are added.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue to which permissions are added.

Queue URLs and names are case-sensitive.

" }, "Label":{ "shape":"String", @@ -331,11 +331,11 @@ }, "AWSAccountIds":{ "shape":"AWSAccountIdList", - "documentation":"

The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon Simple Queue Service Developer Guide.

" }, "Actions":{ "shape":"ActionNameList", - "documentation":"

The action the client wants to allow for the specified principal. The following values are valid:

  • *

  • ChangeMessageVisibility

  • DeleteMessage

  • GetQueueAttributes

  • GetQueueUrl

  • ReceiveMessage

  • SendMessage

For more information about these actions, see Understanding Permissions in the Amazon Simple Queue Service Developer Guide.

Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

" + "documentation":"

The action the client wants to allow for the specified principal. Valid values: the name of any action or *.

For more information about these actions, see Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon Simple Queue Service Developer Guide.

Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

" } }, "documentation":"

" @@ -386,7 +386,7 @@ }, "SenderFault":{ "shape":"Boolean", - "documentation":"

Specifies whether the error happened due to the sender's fault.

" + "documentation":"

Specifies whether the error happened due to the producer.

" }, "Code":{ "shape":"String", @@ -397,7 +397,7 @@ "documentation":"

A message explaining why the action failed on this entry.

" } }, - "documentation":"

This is used in the responses of batch API to give a detailed description of the result of an action on each entry in the request.

" + "documentation":"

Gives a detailed description of the result of an action on each entry in the request.

" }, "BatchResultErrorEntryList":{ "type":"list", @@ -425,7 +425,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue whose messages' visibility is changed.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue whose messages' visibility is changed.

Queue URLs and names are case-sensitive.

" }, "Entries":{ "shape":"ChangeMessageVisibilityBatchRequestEntryList", @@ -454,7 +454,7 @@ "documentation":"

The new value (in seconds) for the message's visibility timeout.

" } }, - "documentation":"

Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch.

All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value starting with 1. For example, a parameter list for this action might look like this:

&amp;ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2

&amp;ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=<replaceable>Your_Receipt_Handle</replaceable>

&amp;ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45

" + "documentation":"

Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch.

All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value starting with 1. For example, a parameter list for this action might look like this:

&ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2

&ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=your_receipt_handle

&ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45

" }, "ChangeMessageVisibilityBatchRequestEntryList":{ "type":"list", @@ -511,7 +511,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue whose message's visibility is changed.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue whose message's visibility is changed.

Queue URLs and names are case-sensitive.

" }, "ReceiptHandle":{ "shape":"String", @@ -529,11 +529,11 @@ "members":{ "QueueName":{ "shape":"String", - "documentation":"

The name of the new queue. The following limits apply to this name:

  • A queue name can have up to 80 characters.

  • Valid values: alphanumeric characters, hyphens (-), and underscores (_).

  • A FIFO queue name must end with the .fifo suffix.

Queue names are case-sensitive.

" + "documentation":"

The name of the new queue. The following limits apply to this name:

  • A queue name can have up to 80 characters.

  • Valid values: alphanumeric characters, hyphens (-), and underscores (_).

  • A FIFO queue name must end with the .fifo suffix.

Queue URLs and names are case-sensitive.

" }, "Attributes":{ "shape":"QueueAttributeMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). The default is 0 (zero).

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default is 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). The default is 0 (zero).

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue. Valid values: An integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Designates a queue as FIFO. Valid values: true, false. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

    For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

  • ContentBasedDeduplication - Enables content-based deduplication. Valid values: true, false. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

Any other valid special request parameters (such as the following) are ignored:

  • ApproximateNumberOfMessages

  • ApproximateNumberOfMessagesDelayed

  • ApproximateNumberOfMessagesNotVisible

  • CreatedTimestamp

  • LastModifiedTimestamp

  • QueueArn

", + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Designates a queue as FIFO. Valid values: true, false. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

    For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

  • ContentBasedDeduplication - Enables content-based deduplication. Valid values: true, false. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

", "locationName":"Attribute" } }, @@ -558,7 +558,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue from which messages are deleted.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue from which messages are deleted.

Queue URLs and names are case-sensitive.

" }, "Entries":{ "shape":"DeleteMessageBatchRequestEntryList", @@ -639,7 +639,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue from which messages are deleted.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue from which messages are deleted.

Queue URLs and names are case-sensitive.

" }, "ReceiptHandle":{ "shape":"String", @@ -654,7 +654,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue to delete.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue to delete.

Queue URLs and names are case-sensitive.

" } }, "documentation":"

" @@ -677,11 +677,11 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue whose attribute information is retrieved.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue whose attribute information is retrieved.

Queue URLs and names are case-sensitive.

" }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

  • All - Returns all values.

  • ApproximateNumberOfMessages - Returns the approximate number of visible messages in a queue. For more information, see Resources Required to Process Messages in the Amazon Simple Queue Service Developer Guide.

  • ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages that are waiting to be added to the queue.

  • ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that have not timed-out and aren't deleted. For more information, see Resources Required to Process Messages in the Amazon Simple Queue Service Developer Guide.

  • CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time).

  • DelaySeconds - Returns the default delay on the queue in seconds.

  • LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time).

  • MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

  • MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.

  • Policy - Returns the policy of the queue.

  • QueueArn - Returns the Amazon resource name (ARN) of the queue.

  • ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

  • RedrivePolicy - Returns the string that includes the parameters for dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue.

  • VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

  • KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

  • ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

  • All - Returns all values.

  • ApproximateNumberOfMessages - Returns the approximate number of messages available for retrieval from the queue.

  • ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

  • ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

  • CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time).

  • DelaySeconds - Returns the default delay on the queue in seconds.

  • LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time).

  • MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

  • MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.

  • Policy - Returns the policy of the queue.

  • QueueArn - Returns the Amazon resource name (ARN) of the queue.

  • ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

  • RedrivePolicy - Returns the string that includes the parameters for dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

  • VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

  • KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

  • ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

" } }, "documentation":"

" @@ -703,7 +703,7 @@ "members":{ "QueueName":{ "shape":"String", - "documentation":"

The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (-), and underscores (_).

Queue names are case-sensitive.

" + "documentation":"

The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (-), and underscores (_).

Queue URLs and names are case-sensitive.

" }, "QueueOwnerAWSAccountId":{ "shape":"String", @@ -720,14 +720,14 @@ "documentation":"

The URL of the queue.

" } }, - "documentation":"

For more information, see Responses in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

For more information, see Interpreting Responses in the Amazon Simple Queue Service Developer Guide.

" }, "Integer":{"type":"integer"}, "InvalidAttributeName":{ "type":"structure", "members":{ }, - "documentation":"

The attribute referred to doesn't exist.

", + "documentation":"

The specified attribute doesn't exist.

", "exception":true }, "InvalidBatchEntryId":{ @@ -746,7 +746,7 @@ "type":"structure", "members":{ }, - "documentation":"

The receipt handle isn't valid for the current version.

", + "documentation":"

The specified receipt handle isn't valid for the current version.

", "exception":true }, "InvalidMessageContents":{ @@ -762,7 +762,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of a dead-letter queue.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of a dead-letter queue.

Queue URLs and names are case-sensitive.

" } }, "documentation":"

" @@ -803,7 +803,7 @@ "members":{ "QueueNamePrefix":{ "shape":"String", - "documentation":"

A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.

Queue names are case-sensitive.

" + "documentation":"

A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.

Queue URLs and names are case-sensitive.

" } }, "documentation":"

" @@ -839,7 +839,7 @@ }, "Attributes":{ "shape":"MessageSystemAttributeMap", - "documentation":"

SenderId, SentTimestamp, ApproximateReceiveCount, and/or ApproximateFirstReceiveTimestamp. SentTimestamp and ApproximateFirstReceiveTimestamp are each returned as an integer representing the epoch time in milliseconds.

", + "documentation":"

A map of the attributes requested in ReceiveMessage to their respective values. Supported attributes:

  • ApproximateReceiveCount

  • ApproximateFirstReceiveTimestamp

  • MessageDeduplicationId

  • MessageGroupId

  • SenderId

  • SentTimestamp

  • SequenceNumber

ApproximateFirstReceiveTimestamp and SentTimestamp are each returned as an integer representing the epoch time in milliseconds.

", "locationName":"Attribute" }, "MD5OfMessageAttributes":{ @@ -848,7 +848,7 @@ }, "MessageAttributes":{ "shape":"MessageBodyAttributeMap", - "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items and Validation in the Amazon Simple Queue Service Developer Guide.

", + "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", "locationName":"MessageAttribute" } }, @@ -889,7 +889,7 @@ }, "DataType":{ "shape":"String", - "documentation":"

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Message Attribute Data Types and Validation in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

" } }, "documentation":"

The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

Name, type, value and the message body must not be empty or null. All parts of the message attribute, including Name, Type, and Value, are part of the message size restriction (256 KB or 262,144 bytes).

" @@ -918,7 +918,7 @@ "type":"structure", "members":{ }, - "documentation":"

The message referred to isn't in flight.

", + "documentation":"

The specified message isn't in flight.

", "error":{ "code":"AWS.SimpleQueueService.MessageNotInflight", "httpStatusCode":400, @@ -955,7 +955,7 @@ "type":"structure", "members":{ }, - "documentation":"

The action that you requested would violate a limit. For example, ReceiveMessage returns this error if the maximum number of inflight messages is reached. AddPermission returns this error if the maximum number of permissions for the queue is reached.

", + "documentation":"

The specified action violates a limit. For example, ReceiveMessage returns this error if the maximum number of inflight messages is reached and AddPermission returns this error if the maximum number of permissions for the queue is reached.

", "error":{ "code":"OverLimit", "httpStatusCode":403, @@ -981,7 +981,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the queue from which the PurgeQueue action deletes messages.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the queue from which the PurgeQueue action deletes messages.

Queue URLs and names are case-sensitive.

" } }, "documentation":"

" @@ -1026,7 +1026,7 @@ "type":"structure", "members":{ }, - "documentation":"

You must wait 60 seconds after deleting a queue before you can create another one with the same name.

", + "documentation":"

You must wait 60 seconds after deleting a queue before you can create another queue with the same name.

", "error":{ "code":"AWS.SimpleQueueService.QueueDeletedRecently", "httpStatusCode":400, @@ -1038,7 +1038,7 @@ "type":"structure", "members":{ }, - "documentation":"

The queue referred to doesn't exist.

", + "documentation":"

The specified queue doesn't exist.

", "error":{ "code":"AWS.SimpleQueueService.NonExistentQueue", "httpStatusCode":400, @@ -1050,7 +1050,7 @@ "type":"structure", "members":{ }, - "documentation":"

A queue already exists with this name. Amazon SQS returns this error only if the request includes attributes whose values differ from those of the existing queue.

", + "documentation":"

A queue with this name already exists. Amazon SQS returns this error only if the request includes attributes whose values differ from those of the existing queue.

", "error":{ "code":"QueueAlreadyExists", "httpStatusCode":400, @@ -1070,7 +1070,7 @@ "type":"structure", "members":{ }, - "documentation":"

The receipt handle provided isn't valid.

", + "documentation":"

The specified receipt handle isn't valid.

", "exception":true }, "ReceiveMessageRequest":{ @@ -1079,11 +1079,11 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue from which messages are received.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue from which messages are received.

Queue URLs and names are case-sensitive.

" }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

A list of attributes that need to be returned along with each message. These attributes include:

  • All - Returns all values.

  • ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).

  • ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.

  • SenderId

    • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

    • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

  • SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).

  • MessageDeduplicationId - Returns the value provided by the sender that calls the SendMessage action.

  • MessageGroupId - Returns the value provided by the sender that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

  • SequenceNumber - Returns the value provided by Amazon SQS.

Any other valid special request parameters (such as the following) are ignored:

  • ApproximateNumberOfMessages

  • ApproximateNumberOfMessagesDelayed

  • ApproximateNumberOfMessagesNotVisible

  • CreatedTimestamp

  • ContentBasedDeduplication

  • DelaySeconds

  • FifoQueue

  • LastModifiedTimestamp

  • MaximumMessageSize

  • MessageRetentionPeriod

  • Policy

  • QueueArn,

  • ReceiveMessageWaitTimeSeconds

  • RedrivePolicy

  • VisibilityTimeout

" + "documentation":"

A list of s that need to be returned along with each message. These attributes include:

  • All - Returns all values.

  • ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).

  • ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.

  • SenderId

    • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

    • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

  • SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).

  • MessageDeduplicationId - Returns the value provided by the producer that calls the SendMessage action.

  • MessageGroupId - Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

  • SequenceNumber - Returns the value provided by Amazon SQS.

" }, "MessageAttributeNames":{ "shape":"MessageAttributeNameList", @@ -1091,7 +1091,7 @@ }, "MaxNumberOfMessages":{ "shape":"Integer", - "documentation":"

The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values are 1 to 10. Default is 1.

" + "documentation":"

The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10. Default: 1.

" }, "VisibilityTimeout":{ "shape":"Integer", @@ -1103,7 +1103,7 @@ }, "ReceiveRequestAttemptId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

  • You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.

  • When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.

  • If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId.

  • You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).

  • During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

    If a caller of the ReceiveMessage action is still processing messages when the visibility timeout expires and messages become visible, another worker reading from the same queue can receive the same messages and therefore process duplicates. Also, if a reader whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error.

    To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.

  • While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.

  • If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.

The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

  • You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.

  • When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.

  • If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId.

  • You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).

  • During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

    If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error.

    To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.

  • While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.

  • If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.

The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

" } }, "documentation":"

" @@ -1127,7 +1127,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue from which permissions are removed.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue from which permissions are removed.

Queue URLs and names are case-sensitive.

" }, "Label":{ "shape":"String", @@ -1145,7 +1145,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue to which batched messages are sent.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue to which batched messages are sent.

Queue URLs and names are case-sensitive.

" }, "Entries":{ "shape":"SendMessageBatchRequestEntryList", @@ -1163,7 +1163,7 @@ "members":{ "Id":{ "shape":"String", - "documentation":"

An identifier for a message in this batch used to communicate the result.

The Ids of a batch request need to be unique within a request

" + "documentation":"

An identifier for a message in this batch used to communicate the result.

The Ids of a batch request need to be unique within a request

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

" }, "MessageBody":{ "shape":"String", @@ -1175,16 +1175,16 @@ }, "MessageAttributes":{ "shape":"MessageBodyAttributeMap", - "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items and Validation in the Amazon Simple Queue Service Developer Guide.

", + "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", "locationName":"MessageAttribute" }, "MessageDeduplicationId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the recipient of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" }, "MessageGroupId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple readers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values are alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

" } }, "documentation":"

Contains the details of a single Amazon SQS message along with an Id.

" @@ -1263,7 +1263,7 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue to which a message is sent.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue to which a message is sent.

Queue URLs and names are case-sensitive.

" }, "MessageBody":{ "shape":"String", @@ -1275,16 +1275,16 @@ }, "MessageAttributes":{ "shape":"MessageBodyAttributeMap", - "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items and Validation in the Amazon Simple Queue Service Developer Guide.

", + "documentation":"

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", "locationName":"MessageAttribute" }, "MessageDeduplicationId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the recipient of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" }, "MessageGroupId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple readers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values are alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

  • You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails.

  • ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

" } }, "documentation":"

" @@ -1320,11 +1320,11 @@ "members":{ "QueueUrl":{ "shape":"String", - "documentation":"

The URL of the Amazon SQS queue whose attributes are set.

Queue URLs are case-sensitive.

" + "documentation":"

The URL of the Amazon SQS queue whose attributes are set.

Queue URLs and names are case-sensitive.

" }, "Attributes":{ "shape":"QueueAttributeMap", - "documentation":"

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). The default is 0 (zero).

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). The default is 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). The default is 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue. Valid values: an integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attribute applies only to FIFO (first-in-first-out) queues:

  • ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

Any other valid special request parameters (such as the following) are ignored:

  • ApproximateNumberOfMessages

  • ApproximateNumberOfMessagesDelayed

  • ApproximateNumberOfMessagesNotVisible

  • CreatedTimestamp

  • LastModifiedTimestamp

  • QueueArn

", + "documentation":"

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attribute applies only to FIFO (first-in-first-out) queues:

  • ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

", "locationName":"Attribute" } }, @@ -1420,5 +1420,5 @@ } } }, - "documentation":"

Welcome to the Amazon Simple Queue Service API Reference.

Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components.

Standard queues are available in all regions. FIFO queues are available in the US East (N. Virginia), US East (Ohio), US West (Oregon), and EU (Ireland) regions.

You can use AWS SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically:

  • Cryptographically sign your service requests

  • Retry requests

  • Handle error responses

Additional Information

" + "documentation":"

Welcome to the Amazon Simple Queue Service API Reference.

Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components.

Standard queues are available in all regions. FIFO queues are available in the US East (N. Virginia), US East (Ohio), US West (Oregon), and EU (Ireland) regions.

You can use AWS SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically:

  • Cryptographically sign your service requests

  • Retry requests

  • Handle error responses

Additional Information

" } diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 1521383f..75ee7264 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -80,7 +80,7 @@ {"shape":"InvalidTarget"}, {"shape":"InvalidSchedule"} ], - "documentation":"

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, the SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

" + "documentation":"

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

" }, "CreateAssociationBatch":{ "name":"CreateAssociationBatch", @@ -103,7 +103,7 @@ {"shape":"InvalidTarget"}, {"shape":"InvalidSchedule"} ], - "documentation":"

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, the SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

" + "documentation":"

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

" }, "CreateDocument":{ "name":"CreateDocument", @@ -761,6 +761,21 @@ ], "documentation":"

Lists all patch groups that have been registered with patch baselines.

" }, + "DescribeSessions":{ + "name":"DescribeSessions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSessionsRequest"}, + "output":{"shape":"DescribeSessionsResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidFilterKey"}, + {"shape":"InvalidNextToken"} + ], + "documentation":"

Retrieves a list of all active sessions (both connected and disconnected) or terminated sessions from the past 30 days.

" + }, "GetAutomationExecution":{ "name":"GetAutomationExecution", "http":{ @@ -792,6 +807,19 @@ ], "documentation":"

Returns detailed information about command execution for an invocation or plugin.

" }, + "GetConnectionStatus":{ + "name":"GetConnectionStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConnectionStatusRequest"}, + "output":{"shape":"GetConnectionStatusResponse"}, + "errors":[ + {"shape":"InternalServerError"} + ], + "documentation":"

Retrieves the Session Manager connection status for an instance to determine whether it is connected and ready to receive Session Manager connections.

" + }, "GetDefaultPatchBaseline":{ "name":"GetDefaultPatchBaseline", "http":{ @@ -845,8 +873,10 @@ "errors":[ {"shape":"InternalServerError"}, {"shape":"InvalidFilter"}, + {"shape":"InvalidInventoryGroupException"}, {"shape":"InvalidNextToken"}, {"shape":"InvalidTypeNameException"}, + {"shape":"InvalidAggregatorException"}, {"shape":"InvalidResultAttributeException"} ], "documentation":"

Query inventory information.

" @@ -1395,6 +1425,20 @@ ], "documentation":"

Removes all tags from the specified resource.

" }, + "ResumeSession":{ + "name":"ResumeSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResumeSessionRequest"}, + "output":{"shape":"ResumeSessionResponse"}, + "errors":[ + {"shape":"DoesNotExistException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Reconnects a session to an instance after it has been disconnected. Connections can be resumed for disconnected sessions, but not terminated sessions.

This command is primarily for use by client machines to automatically reconnect during intermittent network issues. It is not intended for any other use.

" + }, "SendAutomationSignal":{ "name":"SendAutomationSignal", "http":{ @@ -1467,6 +1511,21 @@ ], "documentation":"

Initiates execution of an Automation document.

" }, + "StartSession":{ + "name":"StartSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSessionRequest"}, + "output":{"shape":"StartSessionResponse"}, + "errors":[ + {"shape":"InvalidDocument"}, + {"shape":"TargetNotConnected"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.

AWS CLI usage: start-session is an interactive command that requires the Session Manager plugin to be installed on the client machine making the call. For information, see Install the Session Manager Plugin for the AWS CLI in the AWS Systems Manager User Guide.

" + }, "StopAutomationExecution":{ "name":"StopAutomationExecution", "http":{ @@ -1482,6 +1541,20 @@ ], "documentation":"

Stop an Automation that is currently executing.

" }, + "TerminateSession":{ + "name":"TerminateSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateSessionRequest"}, + "output":{"shape":"TerminateSessionResponse"}, + "errors":[ + {"shape":"DoesNotExistException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Permanently ends a session and closes the data connection between the Session Manager client and SSM Agent on the instance. A terminated session cannot be resumed.

" + }, "UpdateAssociation":{ "name":"UpdateAssociation", "http":{ @@ -2782,7 +2855,7 @@ }, "value":{ "shape":"CommandFilterValue", - "documentation":"

The filter value.

" + "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: A timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see results occurring July 7, 2018, and later.

  • InvokedBefore: A timestamp to limit your results. For example, specify 2018-07-07T00:00:00Z to see results before July 7, 2018.

  • Status: Specify a valid command status to see a list of all command executions with that status. Status values you can specify include:

    • Pending

    • InProgress

    • Success

    • Cancelled

    • Failed

    • TimedOut

    • Cancelling

  • DocumentName: The name of the SSM document for which you want to see command results.

    For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.

  • ExecutionStage: An enum whose value can be either Executing or Complete.

    • Specify Executing to see a list of command executions that are currently still running.

    • Specify Complete to see a list of command exeuctions that have already completed.

" } }, "documentation":"

Describes a command filter.

" @@ -2792,17 +2865,20 @@ "enum":[ "InvokedAfter", "InvokedBefore", - "Status" + "Status", + "ExecutionStage", + "DocumentName" ] }, "CommandFilterList":{ "type":"list", "member":{"shape":"CommandFilter"}, - "max":3, + "max":5, "min":1 }, "CommandFilterValue":{ "type":"string", + "max":128, "min":1 }, "CommandId":{ @@ -3262,6 +3338,13 @@ "max":255, "min":1 }, + "ConnectionStatus":{ + "type":"string", + "enum":[ + "Connected", + "NotConnected" + ] + }, "CreateActivationRequest":{ "type":"structure", "required":["IamRole"], @@ -4317,7 +4400,7 @@ "members":{ "InstanceInformationFilterList":{ "shape":"InstanceInformationFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of instances.

" + "documentation":"

This is a legacy method. We recommend that you don't use this method. Instead, use the InstanceInformationFilter action. The InstanceInformationFilter action enables you to return instance information by using tags that are specified as a key-value mapping.

If you do use this method, then you can't use the InstanceInformationFilter action. Using this method and the InstanceInformationFilter action causes an exception error.

" }, "Filters":{ "shape":"InstanceInformationStringFilterList", @@ -4836,6 +4919,42 @@ } } }, + "DescribeSessionsRequest":{ + "type":"structure", + "required":["State"], + "members":{ + "State":{ + "shape":"SessionState", + "documentation":"

The session status to retrieve a list of sessions for. For example, \"active\".

" + }, + "MaxResults":{ + "shape":"SessionMaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. (You received this token from a previous call.)

" + }, + "Filters":{ + "shape":"SessionFilterList", + "documentation":"

One or more filters to limit the type of sessions returned by the request.

" + } + } + }, + "DescribeSessionsResponse":{ + "type":"structure", + "members":{ + "Sessions":{ + "shape":"SessionList", + "documentation":"

A list of sessions meeting the request parameters.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. (You received this token from a previous call.)

" + } + } + }, "DescriptionInDocument":{"type":"string"}, "DocumentARN":{ "type":"string", @@ -5159,7 +5278,8 @@ "enum":[ "Command", "Policy", - "Automation" + "Automation", + "Session" ] }, "DocumentVersion":{ @@ -5434,6 +5554,29 @@ } } }, + "GetConnectionStatusRequest":{ + "type":"structure", + "required":["Target"], + "members":{ + "Target":{ + "shape":"SessionTarget", + "documentation":"

The ID of the instance.

" + } + } + }, + "GetConnectionStatusResponse":{ + "type":"structure", + "members":{ + "Target":{ + "shape":"SessionTarget", + "documentation":"

The ID of the instance to check connection status.

" + }, + "Status":{ + "shape":"ConnectionStatus", + "documentation":"

The status of the connection to the instance. For example, 'Connected' or 'Not Connected'.

" + } + } + }, "GetDefaultPatchBaselineRequest":{ "type":"structure", "members":{ @@ -6470,7 +6613,7 @@ "documentation":"

The filter values.

" } }, - "documentation":"

Describes a filter for a specific list of instances.

" + "documentation":"

Describes a filter for a specific list of instances. You can filter instances information by using tags. You specify tags by using a key-value mapping.

Use this action instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The InstanceInformationFilterList method is a legacy method and does not support tags.

" }, "InstanceInformationFilterKey":{ "type":"string", @@ -6687,6 +6830,14 @@ "documentation":"

The activation ID is not valid. Verify the you entered the correct ActivationId or ActivationCode and try again.

", "exception":true }, + "InvalidAggregatorException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The specified aggregator is not valid for inventory groups. Verify that the aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation.

", + "exception":true + }, "InvalidAllowedPatternException":{ "type":"structure", "members":{ @@ -6856,6 +7007,14 @@ "documentation":"

The specified filter value is not valid.

", "exception":true }, + "InvalidInventoryGroupException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The specified inventory group is not valid.

", + "exception":true + }, "InvalidInventoryItemContextException":{ "type":"structure", "members":{ @@ -7022,6 +7181,10 @@ "Aggregators":{ "shape":"InventoryAggregatorList", "documentation":"

Nested aggregators to further refine aggregation for an inventory type.

" + }, + "Groups":{ + "shape":"InventoryGroupList", + "documentation":"

A user-defined set of one or more filters on which to aggregate inventory data. Groups return a count of resources that match and don't match the specified criteria.

" } }, "documentation":"

Specifies the inventory type and attribute for the aggregation execution.

" @@ -7170,7 +7333,36 @@ "InventoryFilterValueList":{ "type":"list", "member":{"shape":"InventoryFilterValue"}, - "max":20, + "max":40, + "min":1 + }, + "InventoryGroup":{ + "type":"structure", + "required":[ + "Name", + "Filters" + ], + "members":{ + "Name":{ + "shape":"InventoryGroupName", + "documentation":"

The name of the group.

" + }, + "Filters":{ + "shape":"InventoryFilterList", + "documentation":"

Filters define the criteria for the group. The matchingCount field displays the number of resources that match the criteria. The notMatchingCount field displays the number of resources that don't match the criteria.

" + } + }, + "documentation":"

A user-defined set of one or more filters on which to aggregate inventory data. Groups return a count of resources that match and don't match the specified criteria.

" + }, + "InventoryGroupList":{ + "type":"list", + "member":{"shape":"InventoryGroup"}, + "max":10, + "min":1 + }, + "InventoryGroupName":{ + "type":"string", + "max":200, "min":1 }, "InventoryItem":{ @@ -7319,7 +7511,8 @@ "NotEqual", "BeginWith", "LessThan", - "GreaterThan" + "GreaterThan", + "Exists" ] }, "InventoryResultEntity":{ @@ -10050,6 +10243,33 @@ "max":1, "min":1 }, + "ResumeSessionRequest":{ + "type":"structure", + "required":["SessionId"], + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The ID of the disconnected session to resume.

" + } + } + }, + "ResumeSessionResponse":{ + "type":"structure", + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The ID of the session.

" + }, + "TokenValue":{ + "shape":"TokenValue", + "documentation":"

An encrypted token value containing session and caller information. Used to authenticate the connection to the instance.

" + }, + "StreamUrl":{ + "shape":"StreamUrl", + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssm-messages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + } + } + }, "S3BucketName":{ "type":"string", "max":63, @@ -10208,6 +10428,177 @@ } }, "ServiceRole":{"type":"string"}, + "Session":{ + "type":"structure", + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The ID of the session.

" + }, + "Target":{ + "shape":"SessionTarget", + "documentation":"

The instance that the Session Manager session connected to.

" + }, + "Status":{ + "shape":"SessionStatus", + "documentation":"

The status of the session. For example, \"Connected\" or \"Terminated\".

" + }, + "StartDate":{ + "shape":"DateTime", + "documentation":"

The date and time, in ISO-8601 Extended format, when the session began.

" + }, + "EndDate":{ + "shape":"DateTime", + "documentation":"

The date and time, in ISO-8601 Extended format, when the session was terminated.

" + }, + "DocumentName":{ + "shape":"DocumentName", + "documentation":"

The name of the Session Manager SSM document used to define the parameters and plugin settings for the session. For example, SSM-SessionManagerRunShell.

" + }, + "Owner":{ + "shape":"SessionOwner", + "documentation":"

The ID of the AWS user account that started the session.

" + }, + "Details":{ + "shape":"SessionDetails", + "documentation":"

Reserved for future use.

" + }, + "OutputUrl":{ + "shape":"SessionManagerOutputUrl", + "documentation":"

Reserved for future use.

" + } + }, + "documentation":"

Information about a Session Manager connection to an instance.

" + }, + "SessionDetails":{ + "type":"string", + "max":1024, + "min":1 + }, + "SessionFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"SessionFilterKey", + "documentation":"

The name of the filter.

" + }, + "value":{ + "shape":"SessionFilterValue", + "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

  • Target: Specify an instance to which session connections have been made.

  • Owner: Specify an AWS user account to see a list of sessions started by that user.

  • Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:

    • Connected

    • Connecting

    • Disconnected

    • Terminated

    • Terminating

    • Failed

" + } + }, + "documentation":"

Describes a filter for Session Manager information.

" + }, + "SessionFilterKey":{ + "type":"string", + "enum":[ + "InvokedAfter", + "InvokedBefore", + "Target", + "Owner", + "Status" + ] + }, + "SessionFilterList":{ + "type":"list", + "member":{"shape":"SessionFilter"}, + "max":5, + "min":1 + }, + "SessionFilterValue":{ + "type":"string", + "max":200, + "min":1 + }, + "SessionId":{ + "type":"string", + "max":96, + "min":1 + }, + "SessionList":{ + "type":"list", + "member":{"shape":"Session"} + }, + "SessionManagerCloudWatchOutputUrl":{ + "type":"string", + "max":2083, + "min":1 + }, + "SessionManagerOutputUrl":{ + "type":"structure", + "members":{ + "S3OutputUrl":{ + "shape":"SessionManagerS3OutputUrl", + "documentation":"

Reserved for future use.

" + }, + "CloudWatchOutputUrl":{ + "shape":"SessionManagerCloudWatchOutputUrl", + "documentation":"

Reserved for future use.

" + } + }, + "documentation":"

Reserved for future use.

" + }, + "SessionManagerParameterName":{ + "type":"string", + "max":255, + "min":1 + }, + "SessionManagerParameterValue":{ + "type":"string", + "max":65535, + "min":1 + }, + "SessionManagerParameterValueList":{ + "type":"list", + "member":{"shape":"SessionManagerParameterValue"} + }, + "SessionManagerParameters":{ + "type":"map", + "key":{"shape":"SessionManagerParameterName"}, + "value":{"shape":"SessionManagerParameterValueList"} + }, + "SessionManagerS3OutputUrl":{ + "type":"string", + "max":2083, + "min":1 + }, + "SessionMaxResults":{ + "type":"integer", + "max":200, + "min":1 + }, + "SessionOwner":{ + "type":"string", + "max":256, + "min":1 + }, + "SessionState":{ + "type":"string", + "enum":[ + "Active", + "History" + ] + }, + "SessionStatus":{ + "type":"string", + "enum":[ + "Connected", + "Connecting", + "Disconnected", + "Terminated", + "Terminating", + "Failed" + ] + }, + "SessionTarget":{ + "type":"string", + "max":50, + "min":1 + }, "SeveritySummary":{ "type":"structure", "members":{ @@ -10334,6 +10725,41 @@ } } }, + "StartSessionRequest":{ + "type":"structure", + "required":["Target"], + "members":{ + "Target":{ + "shape":"SessionTarget", + "documentation":"

The instance to connect to for the session.

" + }, + "DocumentName":{ + "shape":"DocumentARN", + "documentation":"

The name of the SSM document to define the parameters and plugin settings for the session. For example, SSM-SessionManagerRunShell. If no document name is provided, a shell to the instance is launched by default.

" + }, + "Parameters":{ + "shape":"SessionManagerParameters", + "documentation":"

Reserved for future use.

" + } + } + }, + "StartSessionResponse":{ + "type":"structure", + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The ID of the session.

" + }, + "TokenValue":{ + "shape":"TokenValue", + "documentation":"

An encrypted token value containing session and caller information. Used to authenticate the connection to the instance.

" + }, + "StreamUrl":{ + "shape":"StreamUrl", + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssm-messages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in the AWS Systems Manager table of regions and endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + } + } + }, "StatusAdditionalInfo":{ "type":"string", "max":1024 @@ -10523,6 +10949,7 @@ "Cancel" ] }, + "StreamUrl":{"type":"string"}, "String":{"type":"string"}, "StringDateTime":{ "type":"string", @@ -10632,6 +11059,14 @@ "max":300, "min":0 }, + "TargetNotConnected":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The specified target instance for the session is not fully configured for use with Session Manager. For more information, see Getting Started with Session Manager in the AWS Systems Manager User Guide.

", + "exception":true + }, "TargetParameterList":{ "type":"list", "member":{"shape":"ParameterValue"} @@ -10654,11 +11089,35 @@ "max":5, "min":0 }, + "TerminateSessionRequest":{ + "type":"structure", + "required":["SessionId"], + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The ID of the session to terminate.

" + } + } + }, + "TerminateSessionResponse":{ + "type":"structure", + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The ID of the session that has been terminated.

" + } + } + }, "TimeoutSeconds":{ "type":"integer", "max":2592000, "min":30 }, + "TokenValue":{ + "type":"string", + "max":300, + "min":0 + }, "TooManyTagsError":{ "type":"structure", "members":{ diff --git a/botocore/data/waf-regional/2016-11-28/service-2.json b/botocore/data/waf-regional/2016-11-28/service-2.json index 611bdb63..7e2c98a7 100644 --- a/botocore/data/waf-regional/2016-11-28/service-2.json +++ b/botocore/data/waf-regional/2016-11-28/service-2.json @@ -292,6 +292,21 @@ ], "documentation":"

Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

If you just want to remove an IPSet from a Rule, use UpdateRule.

To permanently delete an IPSet from AWS WAF, perform the following steps:

  1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.

  3. Submit a DeleteIPSet request.

" }, + "DeleteLoggingConfiguration":{ + "name":"DeleteLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoggingConfigurationRequest"}, + "output":{"shape":"DeleteLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"} + ], + "documentation":"

Permanently deletes the LoggingConfiguration from the specified web ACL.

" + }, "DeletePermissionPolicy":{ "name":"DeletePermissionPolicy", "http":{ @@ -392,7 +407,8 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"}, {"shape":"WAFReferencedItemException"}, - {"shape":"WAFNonEmptyEntityException"} + {"shape":"WAFNonEmptyEntityException"}, + {"shape":"WAFInvalidOperationException"} ], "documentation":"

Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still used in any WebACL objects or if it still includes any rules.

If you just want to remove a RuleGroup from a WebACL, use UpdateWebACL.

To permanently delete a RuleGroup from AWS WAF, perform the following steps:

  1. Update the RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRuleGroup request.

  3. Submit a DeleteRuleGroup request.

" }, @@ -556,6 +572,20 @@ ], "documentation":"

Returns the IPSet that is specified by IPSetId.

" }, + "GetLoggingConfiguration":{ + "name":"GetLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLoggingConfigurationRequest"}, + "output":{"shape":"GetLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"} + ], + "documentation":"

Returns the LoggingConfiguration for the specified web ACL.

" + }, "GetPermissionPolicy":{ "name":"GetPermissionPolicy", "http":{ @@ -808,6 +838,21 @@ ], "documentation":"

Returns an array of IPSetSummary objects in the response.

" }, + "ListLoggingConfigurations":{ + "name":"ListLoggingConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLoggingConfigurationsRequest"}, + "output":{"shape":"ListLoggingConfigurationsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidParameterException"} + ], + "documentation":"

Returns an array of LoggingConfiguration objects.

" + }, "ListRateBasedRules":{ "name":"ListRateBasedRules", "http":{ @@ -962,6 +1007,21 @@ ], "documentation":"

Returns an array of XssMatchSet objects.

" }, + "PutLoggingConfiguration":{ + "name":"PutLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLoggingConfigurationRequest"}, + "output":{"shape":"PutLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"} + ], + "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose delivery stream. For more information, see Creating an Amazon Kinesis Data Firehose Delivery Stream.

  2. Associate that delivery stream to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose delivery stream. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", "http":{ @@ -976,7 +1036,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidPermissionPolicyException"} ], - "documentation":"

Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL and waf-regional:UpdateWebACL. Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" + "documentation":"

Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" }, "UpdateByteMatchSet":{ "name":"UpdateByteMatchSet", @@ -1038,7 +1098,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /16, /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

You can insert a maximum of 1000 addresses in a single request.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRateBasedRule":{ "name":"UpdateRateBasedRule", @@ -1160,7 +1220,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

    You can only specify a single type of TextTransformation.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateSqlInjectionMatchSet":{ "name":"UpdateSqlInjectionMatchSet", @@ -1180,7 +1240,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

You use SqlInjectionMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You can only specify a single type of TextTransformation.

You use SqlInjectionMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateWebACL":{ "name":"UpdateWebACL", @@ -1222,7 +1282,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

You use XssMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You can only specify a single type of TextTransformation.

You use XssMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" } }, "shapes":{ @@ -1366,11 +1426,11 @@ }, "TargetString":{ "shape":"ByteMatchTargetString", - "documentation":"

The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch. The maximum length of the value is 50 bytes.

Valid values depend on the values that you specified for FieldToMatch:

  • HEADER: The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch, for example, the value of the User-Agent or Referer header.

  • METHOD: The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.

  • URI: The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

If you're using the AWS WAF API

Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64 encoding and include the resulting value, QmFkQm90, in the value of TargetString.

If you're using the AWS CLI or one of the AWS SDKs

The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

" + "documentation":"

The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch. The maximum length of the value is 50 bytes.

Valid values depend on the values that you specified for FieldToMatch:

  • HEADER: The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch, for example, the value of the User-Agent or Referer header.

  • METHOD: The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.

  • URI: The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

  • SINGLE_QUERY_ARG: The parameter in the query string that you will inspect, such as UserName or SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters.

  • ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but instead of inspecting a single parameter, AWS WAF inspects all parameters within the query string for the value or regex pattern that you specify in TargetString.

If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

If you're using the AWS WAF API

Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64 encoding and include the resulting value, QmFkQm90, in the value of TargetString.

If you're using the AWS CLI or one of the AWS SDKs

The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

" }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" }, "PositionalConstraint":{ "shape":"PositionalConstraint", @@ -1891,6 +1951,21 @@ } } }, + "DeleteLoggingConfigurationRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration.

" + } + } + }, + "DeleteLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeletePermissionPolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2165,11 +2240,11 @@ "members":{ "Type":{ "shape":"MatchFieldType", - "documentation":"

The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:

  • HEADER: A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data.

  • METHOD: The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: A query string, which is the part of a URL that appears after a ? character, if any.

  • URI: The part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

" + "documentation":"

The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:

  • HEADER: A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data.

  • METHOD: The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: A query string, which is the part of a URL that appears after a ? character, if any.

  • URI: The part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

  • SINGLE_QUERY_ARG: The parameter in the query string that you will inspect, such as UserName or SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters.

  • ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString.

" }, "Data":{ "shape":"MatchFieldData", - "documentation":"

When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data.

The name of the header is not case sensitive.

" + "documentation":"

When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. The name of the header is not case sensitive.

When the value of Type is SINGLE_QUERY_ARG, enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion. The parameter name is not case sensitive.

If the value of Type is any other value, omit Data.

" } }, "documentation":"

Specifies where in a web request to look for TargetString.

" @@ -2611,6 +2686,25 @@ } } }, + "GetLoggingConfigurationRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration.

" + } + } + }, + "GetLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The LoggingConfiguration for the specified web ACL.

" + } + } + }, "GetPermissionPolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2972,7 +3066,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

" } }, - "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, /32, /48, /56, /64 and /128 for IPv6.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify a /128, /64, /56, /48, /32, /24, /16, or /8 CIDR. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /16, /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /16, /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "IPSetDescriptor":{ "type":"structure", @@ -3158,6 +3252,32 @@ } } }, + "ListLoggingConfigurationsRequest":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

If you specify a value for Limit and you have more LoggingConfigurations than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of LoggingConfigurations. For the second and subsequent ListLoggingConfigurations requests, specify the value of NextMarker from the previous response to get information about another batch of ListLoggingConfigurations.

" + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

Specifies the number of LoggingConfigurations that you want AWS WAF to return for this request. If you have more LoggingConfigurations than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of LoggingConfigurations.

" + } + } + }, + "ListLoggingConfigurationsResponse":{ + "type":"structure", + "members":{ + "LoggingConfigurations":{ + "shape":"LoggingConfigurations", + "documentation":"

An array of LoggingConfiguration objects.

" + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

If you have more LoggingConfigurations than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more LoggingConfigurations, submit another ListLoggingConfigurations request, and specify the NextMarker value from the response in the NextMarker value in the next request.

" + } + } + }, "ListRateBasedRulesRequest":{ "type":"structure", "members":{ @@ -3441,6 +3561,38 @@ }, "documentation":"

The response to a ListXssMatchSets request.

" }, + "LogDestinationConfigs":{ + "type":"list", + "member":{"shape":"ResourceArn"}, + "max":1, + "min":1 + }, + "LoggingConfiguration":{ + "type":"structure", + "required":[ + "ResourceArn", + "LogDestinationConfigs" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs.

" + }, + "LogDestinationConfigs":{ + "shape":"LogDestinationConfigs", + "documentation":"

An array of Amazon Kinesis Data Firehose delivery stream ARNs.

" + }, + "RedactedFields":{ + "shape":"RedactedFields", + "documentation":"

The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the delivery stream will be xxx.

" + } + }, + "documentation":"

The Amazon Kinesis Data Firehose delivery streams, RedactedFields information, and the web ACL Amazon Resource Name (ARN).

" + }, + "LoggingConfigurations":{ + "type":"list", + "member":{"shape":"LoggingConfiguration"} + }, "ManagedKey":{"type":"string"}, "ManagedKeys":{ "type":"list", @@ -3454,7 +3606,9 @@ "QUERY_STRING", "HEADER", "METHOD", - "BODY" + "BODY", + "SINGLE_QUERY_ARG", + "ALL_QUERY_ARGS" ] }, "MetricName":{"type":"string"}, @@ -3528,7 +3682,7 @@ }, "Type":{ "shape":"PredicateType", - "documentation":"

The type of predicate in a Rule, such as ByteMatchSet or IPSet.

" + "documentation":"

The type of predicate in a Rule, such as ByteMatch or IPSet.

" }, "DataId":{ "shape":"ResourceId", @@ -3553,6 +3707,25 @@ "type":"list", "member":{"shape":"Predicate"} }, + "PutLoggingConfigurationRequest":{ + "type":"structure", + "required":["LoggingConfiguration"], + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The Amazon Kinesis Data Firehose delivery streams that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.

" + } + } + }, + "PutLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The LoggingConfiguration that you submitted in the request.

" + } + } + }, "PutPermissionPolicyRequest":{ "type":"structure", "required":[ @@ -3617,8 +3790,13 @@ }, "RateLimit":{ "type":"long", + "max":2000000000, "min":2000 }, + "RedactedFields":{ + "type":"list", + "member":{"shape":"FieldToMatch"} + }, "RegexMatchSet":{ "type":"structure", "members":{ @@ -3696,7 +3874,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" }, "RegexPatternSetId":{ "shape":"ResourceId", @@ -3997,7 +4175,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" }, "ComparisonOperator":{ "shape":"ComparisonOperator", @@ -4161,7 +4339,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" @@ -4308,7 +4486,7 @@ }, "Updates":{ "shape":"IPSetUpdates", - "documentation":"

An array of IPSetUpdate objects that you want to insert into or delete from an IPSet. For more information, see the applicable data types:

" + "documentation":"

An array of IPSetUpdate objects that you want to insert into or delete from an IPSet. For more information, see the applicable data types:

You can insert a maximum of 1000 addresses in a single request.

" } } }, @@ -4641,7 +4819,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

The operation failed because there was nothing to do. For example:

  • You tried to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL.

  • You tried to remove an IP address from an IPSet, but the IP address isn't in the specified IPSet.

  • You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the specified WebACL.

  • You tried to add a Rule to a WebACL, but the Rule already exists in the specified WebACL.

  • You tried to add an IP address to an IPSet, but the IP address already exists in the specified IPSet.

  • You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists in the specified WebACL.

", + "documentation":"

The operation failed because there was nothing to do. For example:

  • You tried to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL.

  • You tried to remove an IP address from an IPSet, but the IP address isn't in the specified IPSet.

  • You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the specified WebACL.

  • You tried to add a Rule to a WebACL, but the Rule already exists in the specified WebACL.

  • You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists in the specified WebACL.

", "exception":true }, "WAFInvalidParameterException":{ @@ -4659,7 +4837,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

The operation failed because the specified policy is not in the proper format.

The policy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL or waf-regional:UpdateWebACL. Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

", + "documentation":"

The operation failed because the specified policy is not in the proper format.

The policy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

", "exception":true }, "WAFInvalidRegexPatternException":{ @@ -4934,7 +5112,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" diff --git a/botocore/data/waf/2015-08-24/service-2.json b/botocore/data/waf/2015-08-24/service-2.json index d14a4a7a..2f1bd094 100644 --- a/botocore/data/waf/2015-08-24/service-2.json +++ b/botocore/data/waf/2015-08-24/service-2.json @@ -275,6 +275,21 @@ ], "documentation":"

Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

If you just want to remove an IPSet from a Rule, use UpdateRule.

To permanently delete an IPSet from AWS WAF, perform the following steps:

  1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.

  3. Submit a DeleteIPSet request.

" }, + "DeleteLoggingConfiguration":{ + "name":"DeleteLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoggingConfigurationRequest"}, + "output":{"shape":"DeleteLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"} + ], + "documentation":"

Permanently deletes the LoggingConfiguration from the specified web ACL.

" + }, "DeletePermissionPolicy":{ "name":"DeletePermissionPolicy", "http":{ @@ -375,7 +390,8 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"}, {"shape":"WAFReferencedItemException"}, - {"shape":"WAFNonEmptyEntityException"} + {"shape":"WAFNonEmptyEntityException"}, + {"shape":"WAFInvalidOperationException"} ], "documentation":"

Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still used in any WebACL objects or if it still includes any rules.

If you just want to remove a RuleGroup from a WebACL, use UpdateWebACL.

To permanently delete a RuleGroup from AWS WAF, perform the following steps:

  1. Update the RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRuleGroup request.

  3. Submit a DeleteRuleGroup request.

" }, @@ -523,6 +539,20 @@ ], "documentation":"

Returns the IPSet that is specified by IPSetId.

" }, + "GetLoggingConfiguration":{ + "name":"GetLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLoggingConfigurationRequest"}, + "output":{"shape":"GetLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"} + ], + "documentation":"

Returns the LoggingConfiguration for the specified web ACL.

" + }, "GetPermissionPolicy":{ "name":"GetPermissionPolicy", "http":{ @@ -758,6 +788,21 @@ ], "documentation":"

Returns an array of IPSetSummary objects in the response.

" }, + "ListLoggingConfigurations":{ + "name":"ListLoggingConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLoggingConfigurationsRequest"}, + "output":{"shape":"ListLoggingConfigurationsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidParameterException"} + ], + "documentation":"

Returns an array of LoggingConfiguration objects.

" + }, "ListRateBasedRules":{ "name":"ListRateBasedRules", "http":{ @@ -897,6 +942,21 @@ ], "documentation":"

Returns an array of XssMatchSet objects.

" }, + "PutLoggingConfiguration":{ + "name":"PutLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLoggingConfigurationRequest"}, + "output":{"shape":"PutLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"} + ], + "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose delivery stream. For more information, see Creating an Amazon Kinesis Data Firehose Delivery Stream.

  2. Associate that delivery stream to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose delivery stream. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", "http":{ @@ -911,7 +971,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidPermissionPolicyException"} ], - "documentation":"

Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL and waf-regional:UpdateWebACL. Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" + "documentation":"

Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" }, "UpdateByteMatchSet":{ "name":"UpdateByteMatchSet", @@ -973,7 +1033,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /16, /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

You can insert a maximum of 1000 addresses in a single request.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRateBasedRule":{ "name":"UpdateRateBasedRule", @@ -1095,7 +1155,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

    You can only specify a single type of TextTransformation.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateSqlInjectionMatchSet":{ "name":"UpdateSqlInjectionMatchSet", @@ -1115,7 +1175,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

You use SqlInjectionMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You can only specify a single type of TextTransformation.

You use SqlInjectionMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateWebACL":{ "name":"UpdateWebACL", @@ -1157,7 +1217,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

You use XssMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You can only specify a single type of TextTransformation.

You use XssMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" } }, "shapes":{ @@ -1279,11 +1339,11 @@ }, "TargetString":{ "shape":"ByteMatchTargetString", - "documentation":"

The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch. The maximum length of the value is 50 bytes.

Valid values depend on the values that you specified for FieldToMatch:

  • HEADER: The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch, for example, the value of the User-Agent or Referer header.

  • METHOD: The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.

  • URI: The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

If you're using the AWS WAF API

Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64 encoding and include the resulting value, QmFkQm90, in the value of TargetString.

If you're using the AWS CLI or one of the AWS SDKs

The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

" + "documentation":"

The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch. The maximum length of the value is 50 bytes.

Valid values depend on the values that you specified for FieldToMatch:

  • HEADER: The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch, for example, the value of the User-Agent or Referer header.

  • METHOD: The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.

  • URI: The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

  • SINGLE_QUERY_ARG: The parameter in the query string that you will inspect, such as UserName or SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters.

  • ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but instead of inspecting a single parameter, AWS WAF inspects all parameters within the query string for the value or regex pattern that you specify in TargetString.

If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

If you're using the AWS WAF API

Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64 encoding and include the resulting value, QmFkQm90, in the value of TargetString.

If you're using the AWS CLI or one of the AWS SDKs

The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

" }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" }, "PositionalConstraint":{ "shape":"PositionalConstraint", @@ -1804,6 +1864,21 @@ } } }, + "DeleteLoggingConfigurationRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration.

" + } + } + }, + "DeleteLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeletePermissionPolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2063,11 +2138,11 @@ "members":{ "Type":{ "shape":"MatchFieldType", - "documentation":"

The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:

  • HEADER: A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data.

  • METHOD: The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: A query string, which is the part of a URL that appears after a ? character, if any.

  • URI: The part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

" + "documentation":"

The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:

  • HEADER: A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data.

  • METHOD: The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

  • QUERY_STRING: A query string, which is the part of a URL that appears after a ? character, if any.

  • URI: The part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

  • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

  • SINGLE_QUERY_ARG: The parameter in the query string that you will inspect, such as UserName or SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters.

  • ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString.

" }, "Data":{ "shape":"MatchFieldData", - "documentation":"

When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data.

The name of the header is not case sensitive.

" + "documentation":"

When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. The name of the header is not case sensitive.

When the value of Type is SINGLE_QUERY_ARG, enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion. The parameter name is not case sensitive.

If the value of Type is any other value, omit Data.

" } }, "documentation":"

Specifies where in a web request to look for TargetString.

" @@ -2509,6 +2584,25 @@ } } }, + "GetLoggingConfigurationRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration.

" + } + } + }, + "GetLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The LoggingConfiguration for the specified web ACL.

" + } + } + }, "GetPermissionPolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2851,7 +2945,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

" } }, - "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, /32, /48, /56, /64 and /128 for IPv6.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify a /128, /64, /56, /48, /32, /24, /16, or /8 CIDR. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /16, /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /16, /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "IPSetDescriptor":{ "type":"structure", @@ -3037,6 +3131,32 @@ } } }, + "ListLoggingConfigurationsRequest":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

If you specify a value for Limit and you have more LoggingConfigurations than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of LoggingConfigurations. For the second and subsequent ListLoggingConfigurations requests, specify the value of NextMarker from the previous response to get information about another batch of ListLoggingConfigurations.

" + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

Specifies the number of LoggingConfigurations that you want AWS WAF to return for this request. If you have more LoggingConfigurations than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of LoggingConfigurations.

" + } + } + }, + "ListLoggingConfigurationsResponse":{ + "type":"structure", + "members":{ + "LoggingConfigurations":{ + "shape":"LoggingConfigurations", + "documentation":"

An array of LoggingConfiguration objects.

" + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

If you have more LoggingConfigurations than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more LoggingConfigurations, submit another ListLoggingConfigurations request, and specify the NextMarker value from the response in the NextMarker value in the next request.

" + } + } + }, "ListRateBasedRulesRequest":{ "type":"structure", "members":{ @@ -3301,6 +3421,38 @@ }, "documentation":"

The response to a ListXssMatchSets request.

" }, + "LogDestinationConfigs":{ + "type":"list", + "member":{"shape":"ResourceArn"}, + "max":1, + "min":1 + }, + "LoggingConfiguration":{ + "type":"structure", + "required":[ + "ResourceArn", + "LogDestinationConfigs" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs.

" + }, + "LogDestinationConfigs":{ + "shape":"LogDestinationConfigs", + "documentation":"

An array of Amazon Kinesis Data Firehose delivery stream ARNs.

" + }, + "RedactedFields":{ + "shape":"RedactedFields", + "documentation":"

The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the delivery stream will be xxx.

" + } + }, + "documentation":"

The Amazon Kinesis Data Firehose delivery streams, RedactedFields information, and the web ACL Amazon Resource Name (ARN).

" + }, + "LoggingConfigurations":{ + "type":"list", + "member":{"shape":"LoggingConfiguration"} + }, "ManagedKey":{"type":"string"}, "ManagedKeys":{ "type":"list", @@ -3314,7 +3466,9 @@ "QUERY_STRING", "HEADER", "METHOD", - "BODY" + "BODY", + "SINGLE_QUERY_ARG", + "ALL_QUERY_ARGS" ] }, "MetricName":{"type":"string"}, @@ -3388,7 +3542,7 @@ }, "Type":{ "shape":"PredicateType", - "documentation":"

The type of predicate in a Rule, such as ByteMatchSet or IPSet.

" + "documentation":"

The type of predicate in a Rule, such as ByteMatch or IPSet.

" }, "DataId":{ "shape":"ResourceId", @@ -3413,6 +3567,25 @@ "type":"list", "member":{"shape":"Predicate"} }, + "PutLoggingConfigurationRequest":{ + "type":"structure", + "required":["LoggingConfiguration"], + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The Amazon Kinesis Data Firehose delivery streams that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.

" + } + } + }, + "PutLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The LoggingConfiguration that you submitted in the request.

" + } + } + }, "PutPermissionPolicyRequest":{ "type":"structure", "required":[ @@ -3477,8 +3650,13 @@ }, "RateLimit":{ "type":"long", + "max":2000000000, "min":2000 }, + "RedactedFields":{ + "type":"list", + "member":{"shape":"FieldToMatch"} + }, "RegexMatchSet":{ "type":"structure", "members":{ @@ -3556,7 +3734,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" }, "RegexPatternSetId":{ "shape":"ResourceId", @@ -3853,7 +4031,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" }, "ComparisonOperator":{ "shape":"ComparisonOperator", @@ -4017,7 +4195,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" @@ -4164,7 +4342,7 @@ }, "Updates":{ "shape":"IPSetUpdates", - "documentation":"

An array of IPSetUpdate objects that you want to insert into or delete from an IPSet. For more information, see the applicable data types:

" + "documentation":"

An array of IPSetUpdate objects that you want to insert into or delete from an IPSet. For more information, see the applicable data types:

You can insert a maximum of 1000 addresses in a single request.

" } } }, @@ -4497,7 +4675,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

The operation failed because there was nothing to do. For example:

  • You tried to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL.

  • You tried to remove an IP address from an IPSet, but the IP address isn't in the specified IPSet.

  • You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the specified WebACL.

  • You tried to add a Rule to a WebACL, but the Rule already exists in the specified WebACL.

  • You tried to add an IP address to an IPSet, but the IP address already exists in the specified IPSet.

  • You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists in the specified WebACL.

", + "documentation":"

The operation failed because there was nothing to do. For example:

  • You tried to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL.

  • You tried to remove an IP address from an IPSet, but the IP address isn't in the specified IPSet.

  • You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the specified WebACL.

  • You tried to add a Rule to a WebACL, but the Rule already exists in the specified WebACL.

  • You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists in the specified WebACL.

", "exception":true }, "WAFInvalidParameterException":{ @@ -4515,7 +4693,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

The operation failed because the specified policy is not in the proper format.

The policy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL or waf-regional:UpdateWebACL. Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

", + "documentation":"

The operation failed because the specified policy is not in the proper format.

The policy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

", "exception":true }, "WAFInvalidRegexPatternException":{ @@ -4782,7 +4960,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

CMD_LINE

When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" diff --git a/botocore/data/xray/2016-04-12/service-2.json b/botocore/data/xray/2016-04-12/service-2.json index 292a35d8..9ac5bc9a 100644 --- a/botocore/data/xray/2016-04-12/service-2.json +++ b/botocore/data/xray/2016-04-12/service-2.json @@ -24,6 +24,35 @@ ], "documentation":"

Retrieves a list of traces specified by ID. Each trace is a collection of segment documents that originates from a single request. Use GetTraceSummaries to get a list of trace IDs.

" }, + "CreateSamplingRule":{ + "name":"CreateSamplingRule", + "http":{ + "method":"POST", + "requestUri":"/CreateSamplingRule" + }, + "input":{"shape":"CreateSamplingRuleRequest"}, + "output":{"shape":"CreateSamplingRuleResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"}, + {"shape":"RuleLimitExceededException"} + ], + "documentation":"

Creates a rule to control sampling behavior for instrumented applications. Services retrieve rules with GetSamplingRules, and evaluate each rule in ascending order of priority for each request. If a rule matches, the service records a trace, borrowing it from the reservoir size. After 10 seconds, the service reports back to X-Ray with GetSamplingTargets to get updated versions of each in-use rule. The updated rule contains a trace quota that the service can use instead of borrowing from the reservoir.

" + }, + "DeleteSamplingRule":{ + "name":"DeleteSamplingRule", + "http":{ + "method":"POST", + "requestUri":"/DeleteSamplingRule" + }, + "input":{"shape":"DeleteSamplingRuleRequest"}, + "output":{"shape":"DeleteSamplingRuleResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Deletes a sampling rule.

" + }, "GetEncryptionConfig":{ "name":"GetEncryptionConfig", "http":{ @@ -38,6 +67,48 @@ ], "documentation":"

Retrieves the current encryption configuration for X-Ray data.

" }, + "GetSamplingRules":{ + "name":"GetSamplingRules", + "http":{ + "method":"POST", + "requestUri":"/GetSamplingRules" + }, + "input":{"shape":"GetSamplingRulesRequest"}, + "output":{"shape":"GetSamplingRulesResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Retrieves all sampling rules.

" + }, + "GetSamplingStatisticSummaries":{ + "name":"GetSamplingStatisticSummaries", + "http":{ + "method":"POST", + "requestUri":"/SamplingStatisticSummaries" + }, + "input":{"shape":"GetSamplingStatisticSummariesRequest"}, + "output":{"shape":"GetSamplingStatisticSummariesResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Retrieves information about recent sampling results for all sampling rules.

" + }, + "GetSamplingTargets":{ + "name":"GetSamplingTargets", + "http":{ + "method":"POST", + "requestUri":"/SamplingTargets" + }, + "input":{"shape":"GetSamplingTargetsRequest"}, + "output":{"shape":"GetSamplingTargetsResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Requests a sampling quota for rules that the service is using to sample requests.

" + }, "GetServiceGraph":{ "name":"GetServiceGraph", "http":{ @@ -121,6 +192,20 @@ {"shape":"ThrottledException"} ], "documentation":"

Uploads segment documents to AWS X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.

Segments must include the following fields. For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide.

Required Segment Document Fields

  • name - The name of the service that handled the request.

  • id - A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.

  • trace_id - A unique identifier that connects all segments and subsegments originating from a single client request.

  • start_time - Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010 or 1.480615200010E9.

  • end_time - Time the segment or subsegment was closed. For example, 1480615200.090 or 1.480615200090E9. Specify either an end_time or in_progress.

  • in_progress - Set to true instead of specifying an end_time to record that a segment has been started, but is not complete. Send an in progress segment when your application receives a request that will take a long time to serve, to trace the fact that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.

A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:

Trace ID Format

  • The version number, i.e. 1.

  • The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal.

  • A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.

" + }, + "UpdateSamplingRule":{ + "name":"UpdateSamplingRule", + "http":{ + "method":"POST", + "requestUri":"/UpdateSamplingRule" + }, + "input":{"shape":"UpdateSamplingRuleRequest"}, + "output":{"shape":"UpdateSamplingRuleResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Modifies a sampling rule's configuration.

" } }, "shapes":{ @@ -174,6 +259,22 @@ "key":{"shape":"AnnotationKey"}, "value":{"shape":"ValuesWithServiceIds"} }, + "AttributeKey":{ + "type":"string", + "max":32, + "min":1 + }, + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeKey"}, + "value":{"shape":"AttributeValue"}, + "max":5 + }, + "AttributeValue":{ + "type":"string", + "max":32, + "min":1 + }, "BackendConnectionErrors":{ "type":"structure", "members":{ @@ -235,6 +336,56 @@ } } }, + "BorrowCount":{ + "type":"integer", + "min":0 + }, + "ClientID":{ + "type":"string", + "max":24, + "min":24 + }, + "CreateSamplingRuleRequest":{ + "type":"structure", + "required":["SamplingRule"], + "members":{ + "SamplingRule":{ + "shape":"SamplingRule", + "documentation":"

The rule definition.

" + } + } + }, + "CreateSamplingRuleResult":{ + "type":"structure", + "members":{ + "SamplingRuleRecord":{ + "shape":"SamplingRuleRecord", + "documentation":"

The saved rule definition and metadata.

" + } + } + }, + "DeleteSamplingRuleRequest":{ + "type":"structure", + "members":{ + "RuleName":{ + "shape":"String", + "documentation":"

The name of the sampling rule. Specify a rule by either name or ARN, but not both.

" + }, + "RuleARN":{ + "shape":"String", + "documentation":"

The ARN of the sampling rule. Specify a rule by either name or ARN, but not both.

" + } + } + }, + "DeleteSamplingRuleResult":{ + "type":"structure", + "members":{ + "SamplingRuleRecord":{ + "shape":"SamplingRuleRecord", + "documentation":"

The deleted rule definition and metadata.

" + } + } + }, "Double":{"type":"double"}, "EC2InstanceId":{ "type":"string", @@ -309,7 +460,7 @@ }, "Status":{ "shape":"EncryptionStatus", - "documentation":"

The encryption status. After modifying encryption configuration with PutEncryptionConfig, the status can be UPDATING for up to one hour before X-Ray starts encrypting data with the new key.

" + "documentation":"

The encryption status. While the status is UPDATING, X-Ray may encrypt data with a combination of the new and old settings.

" }, "Type":{ "shape":"EncryptionType", @@ -375,6 +526,11 @@ "max":2000, "min":1 }, + "FixedRate":{ + "type":"double", + "max":1, + "min":0 + }, "GetEncryptionConfigRequest":{ "type":"structure", "members":{ @@ -389,6 +545,77 @@ } } }, + "GetSamplingRulesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

Pagination token. Not used.

" + } + } + }, + "GetSamplingRulesResult":{ + "type":"structure", + "members":{ + "SamplingRuleRecords":{ + "shape":"SamplingRuleRecordList", + "documentation":"

Rule definitions and metadata.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Pagination token. Not used.

" + } + } + }, + "GetSamplingStatisticSummariesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

Pagination token. Not used.

" + } + } + }, + "GetSamplingStatisticSummariesResult":{ + "type":"structure", + "members":{ + "SamplingStatisticSummaries":{ + "shape":"SamplingStatisticSummaryList", + "documentation":"

Information about the number of requests instrumented for each sampling rule.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Pagination token. Not used.

" + } + } + }, + "GetSamplingTargetsRequest":{ + "type":"structure", + "required":["SamplingStatisticsDocuments"], + "members":{ + "SamplingStatisticsDocuments":{ + "shape":"SamplingStatisticsDocumentList", + "documentation":"

Information about rules that the service is using to sample requests.

" + } + } + }, + "GetSamplingTargetsResult":{ + "type":"structure", + "members":{ + "SamplingTargetDocuments":{ + "shape":"SamplingTargetDocumentList", + "documentation":"

Updated rules that the service should use to sample requests.

" + }, + "LastRuleModification":{ + "shape":"Timestamp", + "documentation":"

The last time a user changed the sampling rule configuration. If the sampling rule configuration changed since the service last retrieved it, the service should call GetSamplingRules to get the latest version.

" + }, + "UnprocessedStatistics":{ + "shape":"UnprocessedStatisticsList", + "documentation":"

Information about SamplingStatisticsDocument that X-Ray could not process.

" + } + } + }, "GetServiceGraphRequest":{ "type":"structure", "required":[ @@ -508,6 +735,10 @@ } } }, + "HTTPMethod":{ + "type":"string", + "max":10 + }, "Histogram":{ "type":"list", "member":{"shape":"HistogramEntry"} @@ -526,6 +757,10 @@ }, "documentation":"

An entry in a histogram for a statistic. A histogram maps the range of observed values on the X axis, and the prevalence of each value on the Y axis.

" }, + "Host":{ + "type":"string", + "max":64 + }, "Hostname":{ "type":"string", "max":255 @@ -569,6 +804,11 @@ "NullableDouble":{"type":"double"}, "NullableInteger":{"type":"integer"}, "NullableLong":{"type":"long"}, + "Priority":{ + "type":"integer", + "max":9999, + "min":1 + }, "PutEncryptionConfigRequest":{ "type":"structure", "required":["Type"], @@ -638,10 +878,283 @@ } } }, + "RequestCount":{ + "type":"integer", + "min":0 + }, + "ReservoirSize":{ + "type":"integer", + "min":0 + }, "ResourceARN":{ "type":"string", "max":500 }, + "RuleLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

You have reached the maximum number of sampling rules.

", + "exception":true + }, + "RuleName":{ + "type":"string", + "max":32, + "min":1 + }, + "SampledCount":{ + "type":"integer", + "min":0 + }, + "SamplingRule":{ + "type":"structure", + "required":[ + "ResourceARN", + "Priority", + "FixedRate", + "ReservoirSize", + "ServiceName", + "ServiceType", + "Host", + "HTTPMethod", + "URLPath", + "Version" + ], + "members":{ + "RuleName":{ + "shape":"RuleName", + "documentation":"

The name of the sampling rule. Specify a rule by either name or ARN, but not both.

" + }, + "RuleARN":{ + "shape":"String", + "documentation":"

The ARN of the sampling rule. Specify a rule by either name or ARN, but not both.

" + }, + "ResourceARN":{ + "shape":"ResourceARN", + "documentation":"

Matches the ARN of the AWS resource on which the service runs.

" + }, + "Priority":{ + "shape":"Priority", + "documentation":"

The priority of the sampling rule.

" + }, + "FixedRate":{ + "shape":"FixedRate", + "documentation":"

The percentage of matching requests to instrument, after the reservoir is exhausted.

" + }, + "ReservoirSize":{ + "shape":"ReservoirSize", + "documentation":"

A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.

" + }, + "ServiceName":{ + "shape":"ServiceName", + "documentation":"

Matches the name that the service uses to identify itself in segments.

" + }, + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

Matches the origin that the service uses to identify its type in segments.

" + }, + "Host":{ + "shape":"Host", + "documentation":"

Matches the hostname from a request URL.

" + }, + "HTTPMethod":{ + "shape":"HTTPMethod", + "documentation":"

Matches the HTTP method of a request.

" + }, + "URLPath":{ + "shape":"URLPath", + "documentation":"

Matches the path from a request URL.

" + }, + "Version":{ + "shape":"Version", + "documentation":"

The version of the sampling rule format (1).

" + }, + "Attributes":{ + "shape":"AttributeMap", + "documentation":"

Matches attributes derived from the request.

" + } + }, + "documentation":"

A sampling rule that services use to decide whether to instrument a request. Rule fields can match properties of the service, or properties of a request. The service can ignore rules that don't match its properties.

" + }, + "SamplingRuleRecord":{ + "type":"structure", + "members":{ + "SamplingRule":{ + "shape":"SamplingRule", + "documentation":"

The sampling rule.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

When the rule was created.

" + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

When the rule was last modified.

" + } + }, + "documentation":"

A SamplingRule and its metadata.

" + }, + "SamplingRuleRecordList":{ + "type":"list", + "member":{"shape":"SamplingRuleRecord"} + }, + "SamplingRuleUpdate":{ + "type":"structure", + "members":{ + "RuleName":{ + "shape":"RuleName", + "documentation":"

The name of the sampling rule. Specify a rule by either name or ARN, but not both.

" + }, + "RuleARN":{ + "shape":"String", + "documentation":"

The ARN of the sampling rule. Specify a rule by either name or ARN, but not both.

" + }, + "ResourceARN":{ + "shape":"ResourceARN", + "documentation":"

Matches the ARN of the AWS resource on which the service runs.

" + }, + "Priority":{ + "shape":"NullableInteger", + "documentation":"

The priority of the sampling rule.

" + }, + "FixedRate":{ + "shape":"NullableDouble", + "documentation":"

The percentage of matching requests to instrument, after the reservoir is exhausted.

" + }, + "ReservoirSize":{ + "shape":"NullableInteger", + "documentation":"

A fixed number of matching requests to instrument per second, prior to applying the fixed rate. The reservoir is not used directly by services, but applies to all services using the rule collectively.

" + }, + "Host":{ + "shape":"Host", + "documentation":"

Matches the hostname from a request URL.

" + }, + "ServiceName":{ + "shape":"ServiceName", + "documentation":"

Matches the name that the service uses to identify itself in segments.

" + }, + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

Matches the origin that the service uses to identify its type in segments.

" + }, + "HTTPMethod":{ + "shape":"HTTPMethod", + "documentation":"

Matches the HTTP method of a request.

" + }, + "URLPath":{ + "shape":"URLPath", + "documentation":"

Matches the path from a request URL.

" + }, + "Attributes":{ + "shape":"AttributeMap", + "documentation":"

Matches attributes derived from the request.

" + } + }, + "documentation":"

A document specifying changes to a sampling rule's configuration.

" + }, + "SamplingStatisticSummary":{ + "type":"structure", + "members":{ + "RuleName":{ + "shape":"String", + "documentation":"

The name of the sampling rule.

" + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

The start time of the reporting window.

" + }, + "RequestCount":{ + "shape":"Integer", + "documentation":"

The number of requests that matched the rule.

" + }, + "BorrowCount":{ + "shape":"Integer", + "documentation":"

The number of requests recorded with borrowed reservoir quota.

" + }, + "SampledCount":{ + "shape":"Integer", + "documentation":"

The number of requests recorded.

" + } + }, + "documentation":"

Aggregated request sampling data for a sampling rule across all services for a 10 second window.

" + }, + "SamplingStatisticSummaryList":{ + "type":"list", + "member":{"shape":"SamplingStatisticSummary"} + }, + "SamplingStatisticsDocument":{ + "type":"structure", + "required":[ + "RuleName", + "ClientID", + "Timestamp", + "RequestCount", + "SampledCount" + ], + "members":{ + "RuleName":{ + "shape":"RuleName", + "documentation":"

The name of the sampling rule.

" + }, + "ClientID":{ + "shape":"ClientID", + "documentation":"

A unique identifier for the service in hexadecimal.

" + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

The current time.

" + }, + "RequestCount":{ + "shape":"RequestCount", + "documentation":"

The number of requests that matched the rule.

" + }, + "SampledCount":{ + "shape":"SampledCount", + "documentation":"

The number of requests recorded.

" + }, + "BorrowCount":{ + "shape":"BorrowCount", + "documentation":"

The number of requests recorded with borrowed reservoir quota.

" + } + }, + "documentation":"

Request sampling results for a single rule from a service. Results are for the last 10 seconds unless the service has been assigned a longer reporting interval after a previous call to GetSamplingTargets.

" + }, + "SamplingStatisticsDocumentList":{ + "type":"list", + "member":{"shape":"SamplingStatisticsDocument"}, + "max":25 + }, + "SamplingTargetDocument":{ + "type":"structure", + "members":{ + "RuleName":{ + "shape":"String", + "documentation":"

The name of the sampling rule.

" + }, + "FixedRate":{ + "shape":"Double", + "documentation":"

The percentage of matching requests to instrument, after the reservoir is exhausted.

" + }, + "ReservoirQuota":{ + "shape":"NullableInteger", + "documentation":"

The number of requests per second that X-Ray allocated this service.

" + }, + "ReservoirQuotaTTL":{ + "shape":"Timestamp", + "documentation":"

When the reservoir quota expires.

" + }, + "Interval":{ + "shape":"NullableInteger", + "documentation":"

The number of seconds for the service to wait before getting sampling targets again.

" + } + }, + "documentation":"

Temporary changes to a sampling rule configuration. To meet the global sampling target for a rule, X-Ray calculates a new reservoir for each service based on the recent sampling results of all services that called GetSamplingTargets.

" + }, + "SamplingTargetDocumentList":{ + "type":"list", + "member":{"shape":"SamplingTargetDocument"} + }, "Segment":{ "type":"structure", "members":{ @@ -753,6 +1266,10 @@ "type":"list", "member":{"shape":"Service"} }, + "ServiceName":{ + "type":"string", + "max":64 + }, "ServiceNames":{ "type":"list", "member":{"shape":"String"} @@ -783,6 +1300,10 @@ }, "documentation":"

Response statistics for a service.

" }, + "ServiceType":{ + "type":"string", + "max":64 + }, "String":{"type":"string"}, "TelemetryRecord":{ "type":"structure", @@ -937,6 +1458,32 @@ "type":"list", "member":{"shape":"TraceUser"} }, + "URLPath":{ + "type":"string", + "max":128 + }, + "UnprocessedStatistics":{ + "type":"structure", + "members":{ + "RuleName":{ + "shape":"String", + "documentation":"

The name of the sampling rule.

" + }, + "ErrorCode":{ + "shape":"String", + "documentation":"

The error code.

" + }, + "Message":{ + "shape":"String", + "documentation":"

The error message.

" + } + }, + "documentation":"

Sampling statistics from a call to GetSamplingTargets that X-Ray could not process.

" + }, + "UnprocessedStatisticsList":{ + "type":"list", + "member":{"shape":"UnprocessedStatistics"} + }, "UnprocessedTraceIdList":{ "type":"list", "member":{"shape":"TraceId"} @@ -963,6 +1510,25 @@ "type":"list", "member":{"shape":"UnprocessedTraceSegment"} }, + "UpdateSamplingRuleRequest":{ + "type":"structure", + "required":["SamplingRuleUpdate"], + "members":{ + "SamplingRuleUpdate":{ + "shape":"SamplingRuleUpdate", + "documentation":"

The rule and fields to change.

" + } + } + }, + "UpdateSamplingRuleResult":{ + "type":"structure", + "members":{ + "SamplingRuleRecord":{ + "shape":"SamplingRuleRecord", + "documentation":"

The updated rule definition and metadata.

" + } + } + }, "ValueWithServiceIds":{ "type":"structure", "members":{ @@ -980,6 +1546,10 @@ "ValuesWithServiceIds":{ "type":"list", "member":{"shape":"ValueWithServiceIds"} + }, + "Version":{ + "type":"integer", + "min":1 } }, "documentation":"

AWS X-Ray provides APIs for managing debug traces and retrieving service maps and other data created by processing those traces.

" diff --git a/botocore/endpoint.py b/botocore/endpoint.py index ec335177..efa6be02 100644 --- a/botocore/endpoint.py +++ b/botocore/endpoint.py @@ -17,18 +17,12 @@ import logging import time import threading -from botocore.vendored.requests.adapters import HTTPAdapter -from botocore.vendored.requests.sessions import Session -from botocore.vendored.requests.utils import get_environ_proxies -from botocore.vendored.requests.exceptions import ConnectionError from botocore.vendored import six from botocore.awsrequest import create_request_object -from botocore.exceptions import UnknownEndpointError -from botocore.exceptions import EndpointConnectionError -from botocore.exceptions import ConnectionClosedError -from botocore.compat import filter_ssl_warnings -from botocore.utils import is_valid_endpoint_url +from botocore.exceptions import HTTPClientError +from botocore.httpsession import URLLib3Session +from botocore.utils import is_valid_endpoint_url, get_environ_proxies from botocore.hooks import first_non_none_response from botocore.history import get_global_history_recorder from botocore.response import StreamingBody @@ -39,13 +33,6 @@ logger = logging.getLogger(__name__) history_recorder = get_global_history_recorder() DEFAULT_TIMEOUT = 60 MAX_POOL_CONNECTIONS = 10 -filter_ssl_warnings() - -try: - from botocore.vendored.requests.packages.urllib3.contrib import pyopenssl - pyopenssl.extract_from_urllib3() -except ImportError: - pass def convert_to_response_dict(http_response, operation_model): @@ -83,30 +70,6 @@ def convert_to_response_dict(http_response, operation_model): return response_dict -class BotocoreHTTPSession(Session): - """Internal session class used to workaround requests behavior. - - This class is intended to be used only by the Endpoint class. - - """ - def __init__(self, max_pool_connections=MAX_POOL_CONNECTIONS, - http_adapter_cls=HTTPAdapter): - super(BotocoreHTTPSession, self).__init__() - # In order to support a user provided "max_pool_connections", we need - # to recreate the HTTPAdapter and pass in our max_pool_connections - # value. - adapter = http_adapter_cls(pool_maxsize=max_pool_connections) - # requests uses an HTTPAdapter for mounting both http:// and https:// - self.mount('https://', adapter) - self.mount('http://', adapter) - - def rebuild_auth(self, prepared_request, response): - # Keep the existing auth information from the original prepared request. - # Normally this method would be where auth is regenerated as needed. - # By making this a noop, we're keeping the existing auth info. - pass - - class Endpoint(object): """ Represents an endpoint for a particular service in a specific @@ -117,41 +80,37 @@ class Endpoint(object): :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ - - def __init__(self, host, endpoint_prefix, - event_emitter, proxies=None, verify=True, - timeout=DEFAULT_TIMEOUT, response_parser_factory=None, - max_pool_connections=MAX_POOL_CONNECTIONS): + def __init__(self, host, endpoint_prefix, event_emitter, + response_parser_factory=None, http_session=None): self._endpoint_prefix = endpoint_prefix self._event_emitter = event_emitter self.host = host - self.verify = verify - if proxies is None: - proxies = {} - self.proxies = proxies - self.http_session = BotocoreHTTPSession( - max_pool_connections=max_pool_connections) - self.timeout = timeout - self.max_pool_connections = max_pool_connections - logger.debug('Setting %s timeout as %s', endpoint_prefix, self.timeout) self._lock = threading.Lock() if response_parser_factory is None: response_parser_factory = parsers.ResponseParserFactory() self._response_parser_factory = response_parser_factory + self.http_session = http_session + if self.http_session is None: + self.http_session = URLLib3Session() def __repr__(self): return '%s(%s)' % (self._endpoint_prefix, self.host) def make_request(self, operation_model, request_dict): - logger.debug("Making request for %s (verify_ssl=%s) with params: %s", - operation_model, self.verify, request_dict) + logger.debug("Making request for %s with params: %s", + operation_model, request_dict) return self._send_request(request_dict, operation_model) def create_request(self, params, operation_model=None): request = create_request_object(params) if operation_model: - event_name = 'request-created.{endpoint_prefix}.{op_name}'.format( - endpoint_prefix=self._endpoint_prefix, + request.stream_output = any([ + operation_model.has_streaming_output, + operation_model.has_event_stream_output + ]) + service_id = operation_model.service_model.service_id.hyphenize() + event_name = 'request-created.{service_id}.{op_name}'.format( + service_id=service_id, op_name=operation_model.name) self._event_emitter.emit(event_name, request=request, operation_name=operation_model.name) @@ -212,32 +171,14 @@ class Endpoint(object): 'url': request.url, 'body': request.body }) - streaming = any([ - operation_model.has_streaming_output, - operation_model.has_event_stream_output - ]) - http_response = self.http_session.send( - request, verify=self.verify, - stream=streaming, - proxies=self.proxies, timeout=self.timeout) - except ConnectionError as e: - # For a connection error, if it looks like it's a DNS - # lookup issue, 99% of the time this is due to a misconfigured - # region/endpoint so we'll raise a more specific error message - # to help users. - logger.debug("ConnectionError received when sending HTTP request.", - exc_info=True) - if self._looks_like_dns_error(e): - endpoint_url = e.request.url - better_exception = EndpointConnectionError( - endpoint_url=endpoint_url, error=e) - return (None, better_exception) - elif self._looks_like_bad_status_line(e): - better_exception = ConnectionClosedError( - endpoint_url=e.request.url, request=e.request) - return (None, better_exception) - else: - return (None, e) + service_id = operation_model.service_model.service_id.hyphenize() + event_name = 'before-send.%s.%s' % (service_id, operation_model.name) + responses = self._event_emitter.emit(event_name, request=request) + http_response = first_non_none_response(responses) + if http_response is None: + http_response = self._send(request) + except HTTPClientError as e: + return (None, e) except Exception as e: logger.debug("Exception received when sending HTTP request.", exc_info=True) @@ -257,16 +198,12 @@ class Endpoint(object): history_recorder.record('PARSED_RESPONSE', parsed_response) return (http_response, parsed_response), None - def _looks_like_dns_error(self, e): - return 'gaierror' in str(e) and e.request is not None - - def _looks_like_bad_status_line(self, e): - return 'BadStatusLine' in str(e) and e.request is not None - def _needs_retry(self, attempts, operation_model, request_dict, response=None, caught_exception=None): - event_name = 'needs-retry.%s.%s' % (self._endpoint_prefix, - operation_model.name) + service_id = operation_model.service_model.service_id.hyphenize() + event_name = 'needs-retry.%s.%s' % ( + service_id, + operation_model.name) responses = self._event_emitter.emit( event_name, response=response, endpoint=self, operation=operation_model, attempts=attempts, @@ -282,6 +219,9 @@ class Endpoint(object): time.sleep(handler_response) return True + def _send(self, request): + return self.http_session.send(request) + class EndpointCreator(object): def __init__(self, event_emitter): @@ -291,21 +231,30 @@ class EndpointCreator(object): verify=None, response_parser_factory=None, timeout=DEFAULT_TIMEOUT, max_pool_connections=MAX_POOL_CONNECTIONS, + http_session_cls=URLLib3Session, proxies=None): if not is_valid_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) if proxies is None: proxies = self._get_proxies(endpoint_url) - return Endpoint( - endpoint_url, - endpoint_prefix=service_model.endpoint_prefix, - event_emitter=self._event_emitter, + endpoint_prefix = service_model.endpoint_prefix + + logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout) + http_session = http_session_cls( + timeout=timeout, proxies=proxies, verify=self._get_verify_value(verify), - timeout=timeout, max_pool_connections=max_pool_connections, - response_parser_factory=response_parser_factory) + ) + + return Endpoint( + endpoint_url, + endpoint_prefix=endpoint_prefix, + event_emitter=self._event_emitter, + response_parser_factory=response_parser_factory, + http_session=http_session + ) def _get_proxies(self, url): # We could also support getting proxies from a config file, diff --git a/botocore/exceptions.py b/botocore/exceptions.py index b5eb012a..36f5867a 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -12,7 +12,8 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import unicode_literals -from botocore.vendored.requests.exceptions import ConnectionError +from botocore.vendored import requests +from botocore.vendored.requests.packages import urllib3 class BotoCoreError(Exception): @@ -60,20 +61,43 @@ class ApiVersionNotFoundError(BotoCoreError): fmt = 'Unable to load data {data_path} for: {api_version}' -class EndpointConnectionError(BotoCoreError): - fmt = ( - 'Could not connect to the endpoint URL: "{endpoint_url}"') +class HTTPClientError(BotoCoreError): + fmt = 'An HTTP Client raised and unhandled exception: {error}' + def __init__(self, request=None, response=None, **kwargs): + self.request = request + self.response = response + super(HTTPClientError, self).__init__(**kwargs) -class ConnectionClosedError(ConnectionError): +class ConnectionError(BotoCoreError): + fmt = 'An HTTP Client failed to establish a connection: {error}' + + +class EndpointConnectionError(ConnectionError): + fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"' + + +class SSLError(ConnectionError, requests.exceptions.SSLError): + fmt = 'SSL validation failed for {endpoint_url} {error}' + + +class ConnectionClosedError(HTTPClientError): fmt = ( 'Connection was closed before we received a valid response ' 'from endpoint URL: "{endpoint_url}".') - def __init__(self, **kwargs): - msg = self.fmt.format(**kwargs) - kwargs.pop('endpoint_url') - super(ConnectionClosedError, self).__init__(msg, **kwargs) + +class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout, + urllib3.exceptions.ReadTimeoutError): + fmt = 'Read timeout on endpoint URL: "{endpoint_url}"' + + +class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout): + fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"' + + +class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError): + fmt = 'Failed to connect to proxy URL: "{proxy_url}"' class NoCredentialsError(BotoCoreError): diff --git a/botocore/handlers.py b/botocore/handlers.py index 02604aaf..2bf7f709 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -37,6 +37,7 @@ from botocore.exceptions import AliasConflictParameterError from botocore.exceptions import UnsupportedTLSVersionWarning from botocore.utils import percent_encode, SAFE_CHARS from botocore.utils import switch_host_with_param +from botocore.utils import hyphenize_service_id from botocore import retryhandler from botocore import utils @@ -278,17 +279,18 @@ def register_retries_for_service(service_data, session, logger.debug("Not registering retry handlers, could not endpoint " "prefix from model for service %s", service_name) return + service_id = service_data.get('metadata', {}).get('serviceId') + service_event_name = hyphenize_service_id(service_id) config = _load_retry_config(loader, endpoint_prefix) if not config: return logger.debug("Registering retry handlers for service: %s", service_name) handler = retryhandler.create_retry_handler( config, endpoint_prefix) - unique_id = 'retry-config-%s' % endpoint_prefix - session.register('needs-retry.%s' % endpoint_prefix, + unique_id = 'retry-config-%s' % service_event_name + session.register('needs-retry.%s' % service_event_name, handler, unique_id=unique_id) - _register_for_operations(config, session, - service_name=endpoint_prefix) + _register_for_operations(config, session, service_event_name) def _load_retry_config(loader, endpoint_prefix): @@ -299,7 +301,7 @@ def _load_retry_config(loader, endpoint_prefix): return retry_config -def _register_for_operations(config, session, service_name): +def _register_for_operations(config, session, service_event_name): # There's certainly a tradeoff for registering the retry config # for the operations when the service is created. In practice, # there aren't a whole lot of per operation retry configs so @@ -308,8 +310,8 @@ def _register_for_operations(config, session, service_name): if key == '__default__': continue handler = retryhandler.create_retry_handler(config, key) - unique_id = 'retry-config-%s-%s' % (service_name, key) - session.register('needs-retry.%s.%s' % (service_name, key), + unique_id = 'retry-config-%s-%s' % (service_event_name, key) + session.register('needs-retry.%s.%s' % (service_event_name, key), handler, unique_id=unique_id) @@ -728,21 +730,38 @@ def decode_list_object(parsed, context, **kwargs): # Amazon S3 includes this element in the response, and returns encoded key # name values in the following response elements: # Delimiter, Marker, Prefix, NextMarker, Key. + _decode_list_object( + top_level_keys=['Delimiter', 'Marker', 'NextMarker'], + nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')], + parsed=parsed, + context=context + ) + +def decode_list_object_v2(parsed, context, **kwargs): + # From the documentation: If you specify encoding-type request parameter, + # Amazon S3 includes this element in the response, and returns encoded key + # name values in the following response elements: + # Delimiter, Prefix, ContinuationToken, Key, and StartAfter. + _decode_list_object( + top_level_keys=['Delimiter', 'Prefix', 'StartAfter'], + nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')], + parsed=parsed, + context=context + ) + +def _decode_list_object(top_level_keys, nested_keys, parsed, context): if parsed.get('EncodingType') == 'url' and \ context.get('encoding_type_auto_set'): # URL decode top-level keys in the response if present. - top_level_keys = ['Delimiter', 'Marker', 'NextMarker'] for key in top_level_keys: if key in parsed: parsed[key] = unquote_str(parsed[key]) # URL decode nested keys from the response if present. - nested_keys = [('Contents', 'Key'), ('CommonPrefixes', 'Prefix')] for (top_key, child_key) in nested_keys: if top_key in parsed: for member in parsed[top_key]: member[child_key] = unquote_str(member[child_key]) - def convert_body_to_file_like_object(params, **kwargs): if 'Body' in params: if isinstance(params['Body'], six.string_types): @@ -865,7 +884,6 @@ BUILTIN_HANDLERS = [ ('creating-client-class.kinesis', remove_subscribe_to_shard), ('creating-client-class', add_generate_presigned_url), ('creating-client-class.s3', add_generate_presigned_post), - ('creating-client-class.rds', add_generate_db_auth_token), ('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2), ('after-call.iam', json_decode_policies), @@ -879,6 +897,8 @@ BUILTIN_HANDLERS = [ ('before-parameter-build.s3.ListObjects', set_list_objects_encoding_type_url), + ('before-parameter-build.s3.ListObjectsV2', + set_list_objects_encoding_type_url), ('before-call.s3.PutBucketTagging', calculate_md5), ('before-call.s3.PutBucketLifecycle', calculate_md5), ('before-call.s3.PutBucketLifecycleConfiguration', calculate_md5), @@ -913,14 +933,6 @@ BUILTIN_HANDLERS = [ ('before-call.glacier.UploadArchive', add_glacier_checksums), ('before-call.glacier.UploadMultipartPart', add_glacier_checksums), ('before-call.ec2.CopySnapshot', inject_presigned_url_ec2), - ('before-call.rds.CopyDBClusterSnapshot', - inject_presigned_url_rds), - ('before-call.rds.CreateDBCluster', - inject_presigned_url_rds), - ('before-call.rds.CopyDBSnapshot', - inject_presigned_url_rds), - ('before-call.rds.CreateDBInstanceReadReplica', - inject_presigned_url_rds), ('request-created.machinelearning.Predict', switch_host_machinelearning), ('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST), ('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST), @@ -950,6 +962,7 @@ BUILTIN_HANDLERS = [ ('before-parameter-build.route53', fix_route53_ids), ('before-parameter-build.glacier', inject_account_id), ('after-call.s3.ListObjects', decode_list_object), + ('after-call.s3.ListObjectsV2', decode_list_object_v2), # Cloudsearchdomain search operation will be sent by HTTP POST ('request-created.cloudsearchdomain.Search', @@ -975,16 +988,6 @@ BUILTIN_HANDLERS = [ ('docs.*.autoscaling.CreateLaunchConfiguration.complete-section', document_base64_encoding('UserData')), - # RDS PresignedUrl documentation customizations - ('docs.*.rds.CopyDBClusterSnapshot.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.CreateDBCluster.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.CopyDBSnapshot.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.CreateDBInstanceReadReplica.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - # EC2 CopySnapshot documentation customizations ('docs.*.ec2.CopySnapshot.complete-section', AutoPopulatedParam('PresignedUrl').document_auto_populated_param), @@ -1008,6 +1011,44 @@ BUILTIN_HANDLERS = [ 'PutBucketLifecycle', 'PutBucketLogging', 'PutBucketNotification', 'PutBucketPolicy', 'PutBucketReplication', 'PutBucketRequestPayment', 'PutBucketTagging', 'PutBucketVersioning', 'PutBucketWebsite', - 'PutObjectAcl']).hide_param) + 'PutObjectAcl']).hide_param), + + ############# + # RDS + ############# + ('creating-client-class.rds', add_generate_db_auth_token), + + ('before-call.rds.CopyDBClusterSnapshot', + inject_presigned_url_rds), + ('before-call.rds.CreateDBCluster', + inject_presigned_url_rds), + ('before-call.rds.CopyDBSnapshot', + inject_presigned_url_rds), + ('before-call.rds.CreateDBInstanceReadReplica', + inject_presigned_url_rds), + + # RDS PresignedUrl documentation customizations + ('docs.*.rds.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), + ('docs.*.rds.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), + ('docs.*.rds.CopyDBSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), + ('docs.*.rds.CreateDBInstanceReadReplica.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), + + ############# + # Neptune + ############# + ('before-call.neptune.CopyDBClusterSnapshot', + inject_presigned_url_rds), + ('before-call.neptune.CreateDBCluster', + inject_presigned_url_rds), + + # RDS PresignedUrl documentation customizations + ('docs.*.neptune.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), + ('docs.*.neptune.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), ] _add_parameter_aliases(BUILTIN_HANDLERS) diff --git a/botocore/hooks.py b/botocore/hooks.py index cd817a47..a5a297df 100644 --- a/botocore/hooks.py +++ b/botocore/hooks.py @@ -14,6 +14,7 @@ import copy import logging from collections import defaultdict, deque, namedtuple from botocore.compat import accepts_kwargs, six +from botocore.utils import EVENT_ALIASES logger = logging.getLogger(__name__) @@ -343,48 +344,94 @@ class HierarchicalEmitter(BaseEventHooks): return new_instance -class AliasedEventEmitter(HierarchicalEmitter): - EVENT_ALIASES = { - 'api.sagemaker': 'sagemaker' - } - - def __init__(self, event_aliases=None): - super(AliasedEventEmitter, self).__init__() +class EventAliaser(BaseEventHooks): + def __init__(self, event_emitter, event_aliases=None): self._event_aliases = event_aliases if event_aliases is None: - self._event_aliases = self.EVENT_ALIASES + self._event_aliases = EVENT_ALIASES + self._emitter = event_emitter - def _emit(self, event_name, kwargs, stop_on_response=False): + def emit(self, event_name, **kwargs): aliased_event_name = self._alias_event_name(event_name) - return super(AliasedEventEmitter, self)._emit( - aliased_event_name, kwargs, stop_on_response + return self._emitter.emit(aliased_event_name, **kwargs) + + def emit_until_response(self, event_name, **kwargs): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.emit_until_response(aliased_event_name, **kwargs) + + def register(self, event_name, handler, unique_id=None, + unique_id_uses_count=False): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.register( + aliased_event_name, handler, unique_id, unique_id_uses_count ) - def _verify_and_register(self, event_name, handler, unique_id, - register_method, unique_id_uses_count): + def register_first(self, event_name, handler, unique_id=None, + unique_id_uses_count=False): aliased_event_name = self._alias_event_name(event_name) - super(AliasedEventEmitter, self)._verify_and_register( - aliased_event_name, handler, unique_id, register_method, - unique_id_uses_count + return self._emitter.register_first( + aliased_event_name, handler, unique_id, unique_id_uses_count + ) + + def register_last(self, event_name, handler, unique_id=None, + unique_id_uses_count=False): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.register_last( + aliased_event_name, handler, unique_id, unique_id_uses_count ) def unregister(self, event_name, handler=None, unique_id=None, unique_id_uses_count=False): aliased_event_name = self._alias_event_name(event_name) - super(AliasedEventEmitter, self).unregister( + return self._emitter.unregister( aliased_event_name, handler, unique_id, unique_id_uses_count ) def _alias_event_name(self, event_name): for old_part, new_part in self._event_aliases.items(): - if old_part in event_name: - new_name = event_name.replace(old_part, new_part) - logger.debug("Changing event name from %s to %s" % ( - event_name, new_name - )) - return new_name + + # We can't simply do a string replace for everything, otherwise we + # might end up translating substrings that we never intended to + # translate. When there aren't any dots in the old event name + # part, then we can quickly replace the item in the list if it's + # there. + event_parts = event_name.split('.') + if '.' not in old_part: + try: + # Theoretically a given event name could have the same part + # repeated, but in practice this doesn't happen + event_parts[event_parts.index(old_part)] = new_part + except ValueError: + continue + + # If there's dots in the name, it gets more complicated. Now we + # have to replace multiple sections of the original event. + elif old_part in event_name: + old_parts = old_part.split('.') + self._replace_subsection(event_parts, old_parts, new_part) + else: + continue + + new_name = '.'.join(event_parts) + logger.debug("Changing event name from %s to %s" % ( + event_name, new_name + )) + return new_name return event_name + def _replace_subsection(self, sections, old_parts, new_part): + for i in range(len(sections)): + if sections[i] == old_parts[0] and \ + sections[i:i+len(old_parts)] == old_parts: + sections[i:i+len(old_parts)] = [new_part] + return + + def __copy__(self): + return self.__class__( + copy.copy(self._emitter), + copy.copy(self._event_aliases) + ) + class _PrefixTrie(object): """Specialized prefix trie that handles wildcards. diff --git a/botocore/httpsession.py b/botocore/httpsession.py new file mode 100644 index 00000000..28ad8667 --- /dev/null +++ b/botocore/httpsession.py @@ -0,0 +1,278 @@ +import os.path +import logging +import socket +from base64 import b64encode + +from urllib3 import PoolManager, ProxyManager, proxy_from_url, Timeout +from urllib3.util.ssl_ import ( + ssl, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, DEFAULT_CIPHERS, +) +from urllib3.exceptions import SSLError as URLLib3SSLError +from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError +from urllib3.exceptions import ConnectTimeoutError as URLLib3ConnectTimeoutError +from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError +try: + # Always import the original SSLContext, even if it has been patched + from urllib3.contrib.pyopenssl import orig_util_SSLContext as SSLContext +except ImportError: + from urllib3.util.ssl_ import SSLContext + +import botocore.awsrequest +from botocore.vendored import six +from botocore.vendored.six.moves.urllib_parse import unquote +from botocore.compat import filter_ssl_warnings, urlparse +from botocore.exceptions import ( + ConnectionClosedError, EndpointConnectionError, HTTPClientError, + ReadTimeoutError, ProxyConnectionError, ConnectTimeoutError, SSLError +) + +filter_ssl_warnings() +logger = logging.getLogger(__name__) +DEFAULT_TIMEOUT = 60 +MAX_POOL_CONNECTIONS = 10 +DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem') + +try: + from certifi import where +except ImportError: + def where(): + return DEFAULT_CA_BUNDLE + + +def get_cert_path(verify): + if verify is not True: + return verify + + return where() + + +def create_urllib3_context(ssl_version=None, cert_reqs=None, + options=None, ciphers=None): + """ This function is a vendored version of the same function in urllib3 + + We vendor this function to ensure that the SSL contexts we construct + always use the std lib SSLContext instead of pyopenssl. + """ + context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + + # Setting the default here, as we may have no ssl module on import + cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs + + if options is None: + options = 0 + # SSLv2 is easily broken and is considered harmful and dangerous + options |= OP_NO_SSLv2 + # SSLv3 has several problems and is now dangerous + options |= OP_NO_SSLv3 + # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ + # (issue urllib3#309) + options |= OP_NO_COMPRESSION + + context.options |= options + + if getattr(context, 'supports_set_ciphers', True): + # Platform-specific: Python 2.6 + context.set_ciphers(ciphers or DEFAULT_CIPHERS) + + context.verify_mode = cert_reqs + if getattr(context, 'check_hostname', None) is not None: + # Platform-specific: Python 3.2 + # We do our own verification, including fingerprints and alternative + # hostnames. So disable it here + context.check_hostname = False + return context + + +class ProxyConfiguration(object): + """Represents a proxy configuration dictionary. + + This class represents a proxy configuration dictionary and provides utility + functions to retreive well structured proxy urls and proxy headers from the + proxy configuration dictionary. + """ + def __init__(self, proxies=None): + if proxies is None: + proxies = {} + self._proxies = proxies + + def proxy_url_for(self, url): + """Retrirves the corresponding proxy url for a given url. """ + parsed_url = urlparse(url) + proxy = self._proxies.get(parsed_url.scheme) + if proxy: + proxy = self._fix_proxy_url(proxy) + return proxy + + def proxy_headers_for(self, proxy_url): + """Retrirves the corresponding proxy headers for a given proxy url. """ + headers = {} + username, password = self._get_auth_from_url(proxy_url) + if username and password: + basic_auth = self._construct_basic_auth(username, password) + headers['Proxy-Authorization'] = basic_auth + return headers + + def _fix_proxy_url(self, proxy_url): + if proxy_url.startswith('http:') or proxy_url.startswith('https:'): + return proxy_url + elif proxy_url.startswith('//'): + return 'http:' + proxy_url + else: + return 'http://' + proxy_url + + def _construct_basic_auth(self, username, password): + auth_str = '{0}:{1}'.format(username, password) + encoded_str = b64encode(auth_str.encode('ascii')).strip().decode() + return 'Basic {0}'.format(encoded_str) + + def _get_auth_from_url(self, url): + parsed_url = urlparse(url) + try: + return unquote(parsed_url.username), unquote(parsed_url.password) + except (AttributeError, TypeError): + return None, None + + +class URLLib3Session(object): + """A basic HTTP client that supports connection pooling and proxies. + + This class is inspired by requests.adapters.HTTPAdapter, but has been + boiled down to meet the use cases needed by botocore. For the most part + this classes matches the functionality of HTTPAdapter in requests v2.7.0 + (the same as our vendored version). The only major difference of note is + that we currently do not support sending chunked requests. While requests + v2.7.0 implemented this themselves, later version urllib3 support this + directly via a flag to urlopen so enabling it if needed should be trivial. + """ + def __init__(self, + verify=True, + proxies=None, + timeout=None, + max_pool_connections=MAX_POOL_CONNECTIONS, + ): + self._verify = verify + self._proxy_config = ProxyConfiguration(proxies=proxies) + self._pool_classes_by_scheme = { + 'http': botocore.awsrequest.AWSHTTPConnectionPool, + 'https': botocore.awsrequest.AWSHTTPSConnectionPool, + } + if timeout is None: + timeout = DEFAULT_TIMEOUT + if not isinstance(timeout, (int, float)): + timeout = Timeout(connect=timeout[0], read=timeout[1]) + self._timeout = timeout + self._max_pool_connections = max_pool_connections + self._proxy_managers = {} + self._manager = PoolManager( + strict=True, + timeout=self._timeout, + maxsize=self._max_pool_connections, + ssl_context=self._get_ssl_context(), + ) + self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme + + def _get_ssl_context(self): + return create_urllib3_context() + + def _get_proxy_manager(self, proxy_url): + if proxy_url not in self._proxy_managers: + proxy_headers = self._proxy_config.proxy_headers_for(proxy_url) + proxy_manager = proxy_from_url( + proxy_url, + strict=True, + timeout=self._timeout, + proxy_headers=proxy_headers, + maxsize=self._max_pool_connections, + ssl_context=self._get_ssl_context(), + ) + proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme + self._proxy_managers[proxy_url] = proxy_manager + + return self._proxy_managers[proxy_url] + + def _path_url(self, url): + parsed_url = urlparse(url) + path = parsed_url.path + if not path: + path = '/' + if parsed_url.query: + path = path + '?' + parsed_url.query + return path + + def _setup_ssl_cert(self, conn, url, verify): + if url.lower().startswith('https') and verify: + conn.cert_reqs = 'CERT_REQUIRED' + conn.ca_certs = get_cert_path(verify) + else: + conn.cert_reqs = 'CERT_NONE' + conn.ca_certs = None + + def _get_connection_manager(self, url, proxy_url=None): + if proxy_url: + manager = self._get_proxy_manager(proxy_url) + else: + manager = self._manager + return manager + + def _get_request_target(self, url, proxy_url): + if proxy_url and url.startswith('http:'): + # HTTP proxies expect the request_target to be the absolute url to + # know which host to establish a connection to + return url + else: + # otherwise just set the request target to the url path + return self._path_url(url) + + def send(self, request): + try: + proxy_url = self._proxy_config.proxy_url_for(request.url) + manager = self._get_connection_manager(request.url, proxy_url) + conn = manager.connection_from_url(request.url) + self._setup_ssl_cert(conn, request.url, self._verify) + + request_target = self._get_request_target(request.url, proxy_url) + urllib_response = conn.urlopen( + method=request.method, + url=request_target, + body=request.body, + headers=request.headers, + retries=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + ) + + http_response = botocore.awsrequest.AWSResponse( + request.url, + urllib_response.status, + urllib_response.headers, + urllib_response, + ) + + if not request.stream_output: + # Cause the raw stream to be exhausted immediately. We do it + # this way instead of using preload_content because + # preload_content will never buffer chunked responses + http_response.content + + return http_response + except URLLib3SSLError as e: + raise SSLError(endpoint_url=request.url, error=e) + except (NewConnectionError, socket.gaierror) as e: + raise EndpointConnectionError(endpoint_url=request.url, error=e) + except ProxyError as e: + raise ProxyConnectionError(proxy_url=proxy_url, error=e) + except URLLib3ConnectTimeoutError as e: + raise ConnectTimeoutError(endpoint_url=request.url, error=e) + except URLLib3ReadTimeoutError as e: + raise ReadTimeoutError(endpoint_url=request.url, error=e) + except ProtocolError as e: + raise ConnectionClosedError( + error=e, + request=request, + endpoint_url=request.url + ) + except Exception as e: + message = 'Exception received when sending urllib3 HTTP request' + logger.debug(message, exc_info=True) + raise HTTPClientError(error=e) diff --git a/botocore/model.py b/botocore/model.py index 295dd47c..91e2edb4 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -13,7 +13,7 @@ """Abstractions to interact with service models.""" from collections import defaultdict -from botocore.utils import CachedProperty, instance_cache +from botocore.utils import CachedProperty, instance_cache, hyphenize_service_id from botocore.compat import OrderedDict @@ -40,6 +40,11 @@ class UndefinedModelAttributeError(Exception): pass +class ServiceId(str): + def hyphenize(self): + return hyphenize_service_id(self) + + class Shape(object): """Object representing a shape from the service model.""" # To simplify serialization logic, all shape params that are @@ -286,7 +291,7 @@ class ServiceModel(object): @CachedProperty def service_id(self): - return self._get_metadata_property('serviceId') + return ServiceId(self._get_metadata_property('serviceId')) @CachedProperty def signing_name(self): @@ -334,6 +339,10 @@ class ServiceModel(object): def signature_version(self, value): self._signature_version = value + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.service_name) + + class OperationModel(object): def __init__(self, operation_model, service_model, name=None): diff --git a/botocore/parsers.py b/botocore/parsers.py index 6273c89c..9c1a44da 100644 --- a/botocore/parsers.py +++ b/botocore/parsers.py @@ -124,7 +124,7 @@ from botocore.compat import six, XMLParseError from botocore.eventstream import EventStream from botocore.utils import parse_timestamp, merge_dicts, \ - is_json_value_header + is_json_value_header, lowercase_dict LOG = logging.getLogger(__name__) @@ -250,7 +250,11 @@ class ResponseParser(object): if isinstance(parsed, dict): response_metadata = parsed.get('ResponseMetadata', {}) response_metadata['HTTPStatusCode'] = response['status_code'] - response_metadata['HTTPHeaders'] = dict(response['headers']) + # Ensure that the http header keys are all lower cased. Older + # versions of urllib3 (< 1.11) would unintentionally do this for us + # (see urllib3#633). We need to do this conversion manually now. + headers = response['headers'] + response_metadata['HTTPHeaders'] = lowercase_dict(headers) parsed['ResponseMetadata'] = response_metadata return parsed diff --git a/botocore/response.py b/botocore/response.py index 7b58c9e7..f3c5bbaa 100644 --- a/botocore/response.py +++ b/botocore/response.py @@ -19,7 +19,8 @@ import logging from botocore import ScalarTypes from botocore.hooks import first_non_none_response from botocore.compat import json, set_socket_timeout, XMLParseError -from botocore.exceptions import IncompleteReadError +from botocore.exceptions import IncompleteReadError, ReadTimeoutError +from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError from botocore import parsers @@ -73,7 +74,11 @@ class StreamingBody(object): If the amt argument is omitted, read all data. """ - chunk = self._raw_stream.read(amt) + try: + chunk = self._raw_stream.read(amt) + except URLLib3ReadTimeoutError as e: + # TODO: the url will be None as urllib3 isn't setting it yet + raise ReadTimeoutError(endpoint_url=e.url, error=e) self._amount_read += len(chunk) if amt is None or (not chunk and amt > 0): # If the server sends empty contents or diff --git a/botocore/retryhandler.py b/botocore/retryhandler.py index 09b59e55..d7385b20 100644 --- a/botocore/retryhandler.py +++ b/botocore/retryhandler.py @@ -17,10 +17,10 @@ import functools import logging from binascii import crc32 -from botocore.vendored.requests import ConnectionError, Timeout -from botocore.vendored.requests.packages.urllib3.exceptions import ClosedPoolError - -from botocore.exceptions import ChecksumError, EndpointConnectionError +from botocore.exceptions import ( + ChecksumError, EndpointConnectionError, ReadTimeoutError, + ConnectionError, ConnectionClosedError, +) logger = logging.getLogger(__name__) @@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) # this mapping with more specific exceptions. EXCEPTION_MAP = { 'GENERAL_CONNECTION_ERROR': [ - ConnectionError, ClosedPoolError, Timeout, + ConnectionError, ConnectionClosedError, ReadTimeoutError, EndpointConnectionError ], } diff --git a/botocore/session.py b/botocore/session.py index facb16d1..35677a7e 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -30,7 +30,8 @@ from botocore.exceptions import ConfigNotFound, ProfileNotFound from botocore.exceptions import UnknownServiceError, PartialCredentialsError from botocore.errorfactory import ClientExceptionsFactory from botocore import handlers -from botocore.hooks import AliasedEventEmitter, first_non_none_response +from botocore.hooks import HierarchicalEmitter, first_non_none_response +from botocore.hooks import EventAliaser from botocore.loaders import create_loader from botocore.parsers import ResponseParserFactory from botocore.regions import EndpointResolver @@ -38,6 +39,7 @@ from botocore.model import ServiceModel from botocore import paginate from botocore import waiter from botocore import retryhandler, translate +from botocore.utils import EVENT_ALIASES logger = logging.getLogger(__name__) @@ -139,9 +141,10 @@ class Session(object): if session_vars: self.session_var_map.update(session_vars) if event_hooks is None: - self._events = AliasedEventEmitter() + self._original_handler = HierarchicalEmitter() else: - self._events = event_hooks + self._original_handler = event_hooks + self._events = EventAliaser(self._original_handler) if include_builtin_handlers: self._register_builtin_handlers(self._events) self.user_agent_name = 'Botocore' @@ -559,7 +562,8 @@ class Session(object): type_name='service-2', api_version=api_version ) - self._events.emit('service-data-loaded.%s' % service_name, + service_id = EVENT_ALIASES.get(service_name, service_name) + self._events.emit('service-data-loaded.%s' % service_id, service_data=service_data, service_name=service_name, session=self) return service_data diff --git a/botocore/signers.py b/botocore/signers.py index be755849..50689549 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -38,8 +38,9 @@ class RequestSigner(object): signing pipeline, including overrides, request path manipulation, and disabling signing per operation. - :type service_name: string - :param service_name: Name of the service, e.g. ``S3`` + + :type service_id: botocore.model.ServiceId + :param service_id: The service id for the service, e.g. ``S3`` :type region_name: string :param region_name: Name of the service region, e.g. ``us-east-1`` @@ -57,15 +58,14 @@ class RequestSigner(object): :type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks` :param event_emitter: Extension mechanism to fire events. - """ - def __init__(self, service_name, region_name, signing_name, + def __init__(self, service_id, region_name, signing_name, signature_version, credentials, event_emitter): - self._service_name = service_name self._region_name = region_name self._signing_name = signing_name self._signature_version = signature_version self._credentials = credentials + self._service_id = service_id # We need weakref to prevent leaking memory in Python 2.6 on Linux 2.6 self._event_emitter = weakref.proxy(event_emitter) @@ -128,7 +128,8 @@ class RequestSigner(object): # Allow mutating request before signing self._event_emitter.emit( - 'before-sign.{0}.{1}'.format(self._service_name, operation_name), + 'before-sign.{0}.{1}'.format( + self._service_id.hyphenize(), operation_name), request=request, signing_name=signing_name, region_name=self._region_name, signature_version=signature_version, request_signer=self, @@ -177,7 +178,8 @@ class RequestSigner(object): signature_version += suffix handler, response = self._event_emitter.emit_until_response( - 'choose-signer.{0}.{1}'.format(self._service_name, operation_name), + 'choose-signer.{0}.{1}'.format( + self._service_id.hyphenize(), operation_name), signing_name=self._signing_name, region_name=self._region_name, signature_version=signature_version, context=context) diff --git a/botocore/stub.py b/botocore/stub.py index 312a6cef..5c421b09 100644 --- a/botocore/stub.py +++ b/botocore/stub.py @@ -17,7 +17,7 @@ from pprint import pformat from botocore.validate import validate_parameters from botocore.exceptions import ParamValidationError, \ StubResponseError, StubAssertionError, UnStubbedResponseError -from botocore.vendored.requests.models import Response +from botocore.awsrequest import AWSResponse class _ANY(object): @@ -233,9 +233,7 @@ class Stubber(object): % (self.client.meta.service_model.service_name, method)) # Create a successful http response - http_response = Response() - http_response.status_code = 200 - http_response.reason = 'OK' + http_response = AWSResponse(None, 200, {}, None) operation_name = self.client.meta.method_to_api_mapping.get(method) self._validate_response(operation_name, service_response) @@ -285,8 +283,7 @@ class Stubber(object): :type response_meta: dict """ - http_response = Response() - http_response.status_code = http_status_code + http_response = AWSResponse(None, http_status_code, {}, None) # We don't look to the model to build this because the caller would # need to know the details of what the HTTP body would need to diff --git a/botocore/utils.py b/botocore/utils.py index 48b1b451..74b41078 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -20,18 +20,23 @@ import functools import weakref import random import os +import socket +import cgi import dateutil.parser from dateutil.tz import tzlocal, tzutc import botocore -from botocore.exceptions import InvalidExpressionError, ConfigNotFound -from botocore.exceptions import InvalidDNSNameError, ClientError -from botocore.exceptions import MetadataRetrievalError +import botocore.awsrequest +import botocore.httpsession from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit -from botocore.vendored import requests -from botocore.compat import OrderedDict, six - +from botocore.compat import OrderedDict, six, urlparse +from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass +from botocore.exceptions import ( + InvalidExpressionError, ConfigNotFound, InvalidDNSNameError, ClientError, + MetadataRetrievalError, EndpointConnectionError, ReadTimeoutError, + ConnectionClosedError, ConnectTimeoutError, +) logger = logging.getLogger(__name__) DEFAULT_METADATA_SERVICE_TIMEOUT = 1 @@ -42,8 +47,88 @@ METADATA_SECURITY_CREDENTIALS_URL = ( # Based on rfc2986, section 2.3 SAFE_CHARS = '-._~' LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]') -RETRYABLE_HTTP_ERRORS = (requests.Timeout, requests.ConnectionError) +RETRYABLE_HTTP_ERRORS = ( + ReadTimeoutError, EndpointConnectionError, ConnectionClosedError, + ConnectTimeoutError, +) S3_ACCELERATE_WHITELIST = ['dualstack'] +# In switching events from using service name / endpoint prefix to service +# id, we have to preserve compatibility. This maps the instances where either +# is different than the transformed service id. +EVENT_ALIASES = { + "a4b": "alexa-for-business", + "alexaforbusiness": "alexa-for-business", + "api.mediatailor": "mediatailor", + "api.pricing": "pricing", + "api.sagemaker": "sagemaker", + "apigateway": "api-gateway", + "application-autoscaling": "application-auto-scaling", + "appstream2": "appstream", + "autoscaling": "auto-scaling", + "autoscaling-plans": "auto-scaling-plans", + "ce": "cost-explorer", + "cloudhsmv2": "cloudhsm-v2", + "cloudsearchdomain": "cloudsearch-domain", + "cognito-idp": "cognito-identity-provider", + "config": "config-service", + "cur": "cost-and-usage-report-service", + "data.iot": "iot-data-plane", + "data.jobs.iot": "iot-jobs-data-plane", + "data.mediastore": "mediastore-data", + "datapipeline": "data-pipeline", + "devicefarm": "device-farm", + "devices.iot1click": "iot-1click-devices-service", + "directconnect": "direct-connect", + "discovery": "application-discovery-service", + "dms": "database-migration-service", + "ds": "directory-service", + "dynamodbstreams": "dynamodb-streams", + "elasticbeanstalk": "elastic-beanstalk", + "elasticfilesystem": "efs", + "elasticloadbalancing": "elastic-load-balancing", + "elasticmapreduce": "emr", + "elastictranscoder": "elastic-transcoder", + "elb": "elastic-load-balancing", + "elbv2": "elastic-load-balancing-v2", + "email": "ses", + "entitlement.marketplace": "marketplace-entitlement-service", + "es": "elasticsearch-service", + "events": "cloudwatch-events", + "iot-data": "iot-data-plane", + "iot-jobs-data": "iot-jobs-data-plane", + "iot1click-devices": "iot-1click-devices-service", + "iot1click-projects": "iot-1click-projects", + "kinesisanalytics": "kinesis-analytics", + "kinesisvideo": "kinesis-video", + "lex-models": "lex-model-building-service", + "lex-runtime": "lex-runtime-service", + "logs": "cloudwatch-logs", + "machinelearning": "machine-learning", + "marketplace-entitlement": "marketplace-entitlement-service", + "marketplacecommerceanalytics": "marketplace-commerce-analytics", + "metering.marketplace": "marketplace-metering", + "meteringmarketplace": "marketplace-metering", + "mgh": "migration-hub", + "models.lex": "lex-model-building-service", + "monitoring": "cloudwatch", + "mturk-requester": "mturk", + "opsworks-cm": "opsworkscm", + "projects.iot1click": "iot-1click-projects", + "resourcegroupstaggingapi": "resource-groups-tagging-api", + "route53": "route-53", + "route53domains": "route-53-domains", + "runtime.lex": "lex-runtime-service", + "runtime.sagemaker": "sagemaker-runtime", + "sdb": "simpledb", + "secretsmanager": "secrets-manager", + "serverlessrepo": "serverlessapplicationrepository", + "servicecatalog": "service-catalog", + "states": "sfn", + "stepfunctions": "sfn", + "storagegateway": "storage-gateway", + "streams.dynamodb": "dynamodb-streams", + "tagging": "resource-groups-tagging-api" +} class _RetriesExceededError(Exception): @@ -170,6 +255,10 @@ class InstanceMetadataFetcher(object): self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower() self._disabled = self._disabled == 'true' self._user_agent = user_agent + self._session = botocore.httpsession.URLLib3Session( + timeout=self._timeout, + proxies=get_environ_proxies(self._url), + ) def _get_request(self, url, timeout, num_attempts=1): if self._disabled: @@ -182,7 +271,9 @@ class InstanceMetadataFetcher(object): for i in range(num_attempts): try: - response = requests.get(url, timeout=timeout, headers=headers) + AWSRequest = botocore.awsrequest.AWSRequest + request = AWSRequest(method='GET', url=url, headers=headers) + response = self._session.send(request.prepare()) except RETRYABLE_HTTP_ERRORS as e: logger.debug("Caught exception while trying to retrieve " "credentials: %s", e, exc_info=True) @@ -264,6 +355,14 @@ def merge_dicts(dict1, dict2, append_lists=False): dict1[key] = dict2[key] +def lowercase_dict(original): + """Copies the given dictionary ensuring all keys are lowercase strings. """ + copy = {} + for key in original: + copy[key.lower()] = original[key] + return copy + + def parse_key_val_file(filename, _open=open): try: with _open(filename) as f: @@ -874,6 +973,14 @@ def deep_merge(base, extra): base[key] = extra[key] +def hyphenize_service_id(service_id): + """Translate the form used for event emitters. + + :param service_id: The service_id to convert. + """ + return service_id.replace(' ', '-').lower() + + class S3RegionRedirector(object): def __init__(self, endpoint_bridge, client, cache=None): self._endpoint_resolver = endpoint_bridge @@ -930,9 +1037,12 @@ class S3RegionRedirector(object): error_code == 'AuthorizationHeaderMalformed' and 'Region' in error ) + is_redirect_status = response[0] is not None and \ + response[0].status_code in [301, 302, 307] is_permanent_redirect = error_code == 'PermanentRedirect' if not any([is_special_head_object, is_wrong_signing_region, - is_permanent_redirect, is_special_head_bucket]): + is_permanent_redirect, is_special_head_bucket, + is_redirect_status]): return bucket = request_dict['context']['signing']['bucket'] @@ -1030,9 +1140,9 @@ class ContainerMetadataFetcher(object): def __init__(self, session=None, sleep=time.sleep): if session is None: - session = requests.Session() - session.trust_env = False - session.proxies = {} + session = botocore.httpsession.URLLib3Session( + timeout=self.TIMEOUT_SECONDS + ) self._session = session self._sleep = sleep @@ -1093,18 +1203,20 @@ class ContainerMetadataFetcher(object): def _get_response(self, full_url, headers, timeout): try: - response = self._session.get(full_url, headers=headers, - timeout=timeout) + AWSRequest = botocore.awsrequest.AWSRequest + request = AWSRequest(method='GET', url=full_url, headers=headers) + response = self._session.send(request.prepare()) + response_text = response.content.decode('utf-8') if response.status_code != 200: raise MetadataRetrievalError( error_msg="Received non 200 response (%s) from ECS metadata: %s" - % (response.status_code, response.text)) + % (response.status_code, response_text)) try: - return json.loads(response.text) + return json.loads(response_text) except ValueError: raise MetadataRetrievalError( error_msg=("Unable to parse JSON returned from " - "ECS metadata: %s" % response.text)) + "ECS metadata: %s" % response_text)) except RETRYABLE_HTTP_ERRORS as e: error_msg = ("Received error when attempting to retrieve " "ECS metadata: %s" % e) @@ -1112,3 +1224,52 @@ class ContainerMetadataFetcher(object): def full_url(self, relative_uri): return 'http://%s%s' % (self.IP_ADDRESS, relative_uri) + + +def get_environ_proxies(url): + if should_bypass_proxies(url): + return {} + else: + return getproxies() + + +def should_bypass_proxies(url): + """ + Returns whether we should bypass proxies or not. + """ + # NOTE: requests allowed for ip/cidr entries in no_proxy env that we don't + # support current as urllib only checks DNS suffix + # If the system proxy settings indicate that this URL should be bypassed, + # don't proxy. + # The proxy_bypass function is incredibly buggy on OS X in early versions + # of Python 2.6, so allow this call to fail. Only catch the specific + # exceptions we've seen, though: this call failing in other ways can reveal + # legitimate problems. + try: + if proxy_bypass(urlparse(url).netloc): + return True + except (TypeError, socket.gaierror): + pass + + return False + + +def get_encoding_from_headers(headers, default='ISO-8859-1'): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :param default: default encoding if the content-type is text + """ + + content_type = headers.get('content-type') + + if not content_type: + return None + + content_type, params = cgi.parse_header(content_type) + + if 'charset' in params: + return params['charset'].strip("'\"") + + if 'text' in content_type: + return default diff --git a/docs/source/_static/404.html b/docs/source/_static/404.html new file mode 100644 index 00000000..6e056fd6 --- /dev/null +++ b/docs/source/_static/404.html @@ -0,0 +1,34 @@ + + + + + Page Not Found + + + +

Page Not Found

+

Sorry, the page you requested could not be found.

+ diff --git a/docs/source/conf.py b/docs/source/conf.py index 2938d824..30cc9fc5 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.10.' +version = '1.12.' # The full version, including alpha/beta/rc tags. -release = '1.10.78' +release = '1.12.16' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/index.rst b/docs/source/index.rst index 07b0446d..f6fbe722 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -25,6 +25,129 @@ Contents: Upgrade Notes ============= +Upgrading to 1.12.0 +------------------- + +What Changed +~~~~~~~~~~~~ + +The botocore event system was changed to emit events based on the service id +rather than the endpoint prefix or service name. + +Why Was The Change Was Made +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This was done to handle several issues that were becoming increasingly +problematic: + +* Services changing their endpoint prefix would cause some registered events to + no longer fire (but not all). +* New services that launch using an endpoint that another service is using + won't be able to be uniquely selected. There are a number of cases of this + already. +* Services whose client name and endpoint prefix differed would require two + different strings if you want to register against all events. + +How Do I Know If I'm Impacted +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Any users relying on registering an event against one service impacting other +services are impacted. You can consult the following table to see if you are +impacted. If you are registering an event using one of the event parts in the +leftmost column with the intention of impacting an unintended target service +in the rightmost column, then you are impacted and will need to update. + ++----------------------+-------------------------+---------------------------------------------------+ +| Event Part | Intended Target Service | Unintended Target Services | ++----------------------+-------------------------+---------------------------------------------------+ +| rds | rds | neptune | ++----------------------+-------------------------+---------------------------------------------------+ +| autoscaling | autoscaling | application-autoscaling, autoscaling-plans | ++----------------------+-------------------------+---------------------------------------------------+ +| kinesisvideo | kinesisvideo | kinesis-video-media, kinesis-video-archived-media | ++----------------------+-------------------------+---------------------------------------------------+ +| elasticloadbalancing | elb | elbv2 | ++----------------------+-------------------------+---------------------------------------------------+ + +For example, if you are registering an event against +``before-call.elasticloadbalancing`` expecting it to run when making calls with +an ``elbv2`` client, you will be impacted. + +If you are registering an event against one of the services in the Unintended +Targets column, you may be impacted if you were relying on those events not +firing. + +If you are registering events using ``*`` in the service place, or are +registering against any service not in this table, you will not need a code +change. In many cases the actual event name will have changed, but for services +without shared endpoints we do the work of translating the event name at +registration and emission time. In future versions of botocore we will remove +this translation, so you may wish to update your code anyway. + +How Do I Update My Code +~~~~~~~~~~~~~~~~~~~~~~~ + +You will need to look at the events you are registering against and determine +which services you wish to impact with your handler. If you only wish to +impact the intended target service (as defined in the above table), then you +don't need to change the event. If you wish to impact another service in +addition to the intended target service, you will need to register a new event +using that service's event name. Similarly, if you wish to impact another +service instead you will simply need to change the event you are registered +against. + +To get the new event name, consult this table: + ++------------------------------+----------------------+------------------------------+ +| Service | Old Event Name | New Event Name | ++------------------------------+----------------------+------------------------------+ +| application-autoscaling | autoscaling | application-auto-scaling | ++------------------------------+----------------------+------------------------------+ +| autoscaling-plans | autoscaling | auto-scaling-plans | ++------------------------------+----------------------+------------------------------+ +| elbv2 | elasticloadbalancing | elastic-load-balancing | ++------------------------------+----------------------+------------------------------+ +| kinesis-video-archived-media | kinesisvideo | kinesis-video-archived-media | ++------------------------------+----------------------+------------------------------+ +| kinesis-video-media | kinesisvideo | kinesis-video-media | ++------------------------------+----------------------+------------------------------+ +| neptune | rds | neptune | ++------------------------------+----------------------+------------------------------+ + +Additionally, you can get the new event name in code like so:: + + from botocore.session import Session + + session = Session() + client = session.create_client('elbv2') + service_event_name = client.meta.service_model.service_id.hyphenize() + +Armed with the service event name, simply replace the old service name in the +handler with the new service event name. If you were registering an event +against ``before-call.autoscaling`` intending to impact ``autoscaling-plans`` +for example, you would instead register against +``before-call.auto-scaling-plans``. + +If you are registering an event against one of the services in the Unintended +Targets column, you will now see those events getting fired where previously +they were not. While this is enabling that expected behavior, this still +represents a change in actual behavior. You should not need to update your +code, but you should test to ensure that you are seeing the behavior you want. + +Upgrading to 1.11.0 +--------------------- +* The vendored versions of ``requests`` and ``urllib3`` are no longer being + used and have been replaced with a direct dependency on upstream ``urllib3`` + and ``requests`` is no longer a dependency of ``botocore``. While these + vendored dependencies are still in the ``botocore`` package they should not + be used as they will be removed in the future. Any code that imports from + ``botocore.vendored.requests.*`` should be updated accordingly. Specifically, + the use of ``botocore.vendored.requests.exceptions.*`` or + ``botocore.vendored.requests.packages.urllib3.exceptions.*`` must be updated + to the corresponding exception classes in ``botocore.exceptions``. +* The version of ``urllib3`` used to make HTTP requests has been updated from + v1.10.4 to the range >=1.20,<1.24. + Upgrading to 1.0.0rc1 --------------------- diff --git a/docs/source/reference/awsrequest.rst b/docs/source/reference/awsrequest.rst new file mode 100644 index 00000000..5c41fd16 --- /dev/null +++ b/docs/source/reference/awsrequest.rst @@ -0,0 +1,15 @@ +.. _ref-awsrequest: + +================================ +AWS Request Reference +================================ + +botocore.awsrequest +------------------- + +.. autoclass:: botocore.awsrequest.AWSPreparedRequest + :members: + + +.. autoclass:: botocore.awsrequest.AWSResponse + :members: diff --git a/docs/source/topics/events.rst b/docs/source/topics/events.rst index 3a15c6c6..f3366087 100644 --- a/docs/source/topics/events.rst +++ b/docs/source/topics/events.rst @@ -18,33 +18,33 @@ to events. Event Types ----------- -The table below shows all of the events emitted by botocore. In some cases, -the events are listed as ``..bar``, in which ```` -and ```` are replaced with a specific service and operation, for -example ``s3.ListObjects.bar``. +The list below shows all of the events emitted by botocore. In some cases, the +events are listed as ``event-name..``, in which +```` and ```` are replaced with a specific service +identifier operation, for example ``event-name.s3.ListObjects``. -.. list-table:: Events - :header-rows: 1 +* ``'before-send..'`` - * - Event Name - - Occurance - - Arguments - - Return Value - * - **service-created** - - Whenever a service is created via the Sessions ``get_service`` - method. - - ``service`` - The newly created :class:`botocore.service.Service` - object. - - Ignored. - * - **before-call..** - - When an operation is being called (``Operation.call``). - - ``operation`` - The newly created :class:`botocore.operation.Operation` - object. - - Ignored. - * - **after-call..** - - After an operation has been called, but before the response is parsed. - - ``http_response`` - The HTTP response, ``parsed`` - The parsed data. - - Ignored. + +before-send +~~~~~~~~~~~~~~~~~~~~~ + +:Full Event Name: + ``'before-send..'`` + +:Description: + This event is emitted when the operation has been fully serialized, signed, + and is ready to be sent across the wire. This event allows the finalized + request to be inspected and allows a response to be returned that fufills + the request. If no response is returned botocore will fulfill the request + as normal. + +:Keyword Arguments Emitted: + + :type request: :class:`.AWSPreparedRequest` + :param params: An object representing the properties of an HTTP request. + +:Expected Return Value: None or an instance of :class:`.AWSResponse` Event Emission @@ -52,3 +52,15 @@ Event Emission When an event is emitted, the handlers are invoked in the order that they were registered. + + +Service ID +---------- +To get the service id from a service client use the following:: + + import botocore + import botocore.session + + session = botocore.session.Session() + client = session.create_client('elbv2') + service_event_name = client.meta.service_model.service_id.hyphenize() diff --git a/requirements.txt b/requirements.txt index b8db4b44..ab775f26 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,3 +8,4 @@ docutils>=0.10 behave==1.2.5 -e git://github.com/boto/jmespath.git@develop#egg=jmespath jsonschema==2.5.1 +urllib3>=1.20,<1.24 diff --git a/setup.cfg b/setup.cfg index a6878a23..38e1b761 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,6 +9,7 @@ requires-dist = docutils>=0.10 ordereddict==1.1; python_version=="2.6" simplejson==3.3.0; python_version=="2.6" + urllib3>=1.20,<1.24 [egg_info] tag_build = diff --git a/setup.py b/setup.py index 5b475883..5c8eb49b 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,8 @@ def find_version(*file_paths): requires = ['jmespath>=0.7.1,<1.0.0', - 'docutils>=0.10'] + 'docutils>=0.10', + 'urllib3>=1.20,<1.24'] if sys.version_info[:2] == (2, 6): @@ -52,7 +53,7 @@ setup( url='https://github.com/boto/botocore', scripts=[], packages=find_packages(exclude=['tests*']), - package_data={'botocore': ['data/*.json', 'data/*/*.json'], + package_data={'botocore': ['cacert.pem', 'data/*.json', 'data/*/*.json'], 'botocore.vendored.requests': ['*.pem']}, include_package_data=True, install_requires=requires, diff --git a/tests/__init__.py b/tests/__init__.py index 77444970..34d45756 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -23,6 +23,7 @@ import binascii import platform import select import datetime +from io import BytesIO from subprocess import Popen, PIPE from dateutil.tz import tzlocal @@ -38,6 +39,7 @@ from nose.tools import assert_equal import botocore.loaders import botocore.session +from botocore.awsrequest import AWSResponse from botocore.compat import six from botocore.compat import urlparse from botocore.compat import parse_qs @@ -359,3 +361,60 @@ def assert_url_equal(url1, url2): assert_equal(parts1.hostname, parts2.hostname) assert_equal(parts1.port, parts2.port) assert_equal(parse_qs(parts1.query), parse_qs(parts2.query)) + + +class HTTPStubberException(Exception): + pass + + +class RawResponse(BytesIO): + # TODO: There's a few objects similar to this in various tests, let's + # try and consolidate to this one in a future commit. + def stream(self, **kwargs): + contents = self.read() + while contents: + yield contents + contents = self.read() + + +class ClientHTTPStubber(object): + def __init__(self, client): + self.reset() + self._client = client + + def reset(self): + self.requests = [] + self.responses = [] + + def add_response(self, url='https://example.com', status=200, headers=None, + body=b''): + if headers is None: + headers = {} + + raw = RawResponse(body) + response = AWSResponse(url, status, headers, raw) + self.responses.append(response) + + def start(self): + self._client.meta.events.register('before-send', self) + + def stop(self): + self._client.meta.events.unregister('before-send', self) + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.stop() + + def __call__(self, request, **kwargs): + self.requests.append(request) + if self.responses: + response = self.responses.pop(0) + if isinstance(response, Exception): + raise response + else: + return response + else: + raise HTTPStubberException('Insufficient responses') diff --git a/tests/functional/test_apigateway.py b/tests/functional/test_apigateway.py index 8a9bda8a..5422585b 100644 --- a/tests/functional/test_apigateway.py +++ b/tests/functional/test_apigateway.py @@ -12,8 +12,7 @@ # language governing permissions and limitations under the License. import mock -from botocore.stub import Stubber -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber class TestApiGateway(BaseSessionTest): @@ -22,7 +21,7 @@ class TestApiGateway(BaseSessionTest): self.region = 'us-west-2' self.client = self.session.create_client( 'apigateway', self.region) - self.stubber = Stubber(self.client) + self.http_stubber = ClientHTTPStubber(self.client) def test_get_export(self): params = { @@ -32,11 +31,9 @@ class TestApiGateway(BaseSessionTest): 'accepts': 'application/yaml' } - with mock.patch('botocore.endpoint.Session.send') as _send: - _send.return_value = mock.Mock( - status_code=200, headers={}, content=b'{}') + self.http_stubber.add_response(body=b'{}') + with self.http_stubber: self.client.get_export(**params) - sent_request = _send.call_args[0][0] - self.assertEqual(sent_request.method, 'GET') - self.assertEqual( - sent_request.headers.get('Accept'), b'application/yaml') + request = self.http_stubber.requests[0] + self.assertEqual(request.method, 'GET') + self.assertEqual(request.headers.get('Accept'), b'application/yaml') diff --git a/tests/functional/test_cloudsearchdomain.py b/tests/functional/test_cloudsearchdomain.py index 568819a7..a94bb7c6 100644 --- a/tests/functional/test_cloudsearchdomain.py +++ b/tests/functional/test_cloudsearchdomain.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import mock -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber class TestCloudsearchdomain(BaseSessionTest): @@ -21,15 +21,14 @@ class TestCloudsearchdomain(BaseSessionTest): self.region = 'us-west-2' self.client = self.session.create_client( 'cloudsearchdomain', self.region) + self.http_stubber = ClientHTTPStubber(self.client) def test_search(self): - with mock.patch('botocore.endpoint.Session.send') as _send: - _send.return_value = mock.Mock( - status_code=200, headers={}, content=b'{}') + self.http_stubber.add_response(body=b'{}') + with self.http_stubber: self.client.search(query='foo') - sent_request = _send.call_args[0][0] - self.assertEqual(sent_request.method, 'POST') - self.assertEqual( - sent_request.headers.get('Content-Type'), - b'application/x-www-form-urlencoded') - self.assertIn('q=foo', sent_request.body) + request = self.http_stubber.requests[0] + self.assertIn('q=foo', request.body) + self.assertEqual(request.method, 'POST') + content_type = b'application/x-www-form-urlencoded' + self.assertEqual(request.headers.get('Content-Type'), content_type) diff --git a/tests/functional/test_cognito_idp.py b/tests/functional/test_cognito_idp.py index 164db992..81f49321 100644 --- a/tests/functional/test_cognito_idp.py +++ b/tests/functional/test_cognito_idp.py @@ -14,7 +14,7 @@ import mock from nose.tools import assert_false -from tests import create_session +from tests import create_session, ClientHTTPStubber def test_unsigned_operations(): @@ -104,15 +104,15 @@ class UnsignedOperationTestCase(object): self._client = client self._operation_name = operation_name self._parameters = parameters + self._http_stubber = ClientHTTPStubber(self._client) def run(self): operation = getattr(self._client, self._operation_name) - with mock.patch('botocore.endpoint.Session.send') as _send: - _send.return_value = mock.Mock( - status_code=200, headers={}, content=b'{}') + self._http_stubber.add_response(body=b'{}') + with self._http_stubber: operation(**self._parameters) - request = _send.call_args[0][0] + request = self._http_stubber.requests[0] assert_false( 'authorization' in request.headers, diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py index 8aea11ae..bd6158c4 100644 --- a/tests/functional/test_credentials.py +++ b/tests/functional/test_credentials.py @@ -20,7 +20,6 @@ import shutil from datetime import datetime, timedelta import sys -from botocore.vendored import requests from dateutil.tz import tzlocal from botocore.exceptions import CredentialRetrievalError diff --git a/tests/functional/test_event_alias.py b/tests/functional/test_event_alias.py new file mode 100644 index 00000000..604f0128 --- /dev/null +++ b/tests/functional/test_event_alias.py @@ -0,0 +1,608 @@ +from botocore.session import Session + + +# The list of services which were available when we switched over from using +# endpoint prefix in event to using service id. These should all accept +# either. +SERVICES = { + "acm": { + "endpoint_prefix": "acm", + "service_id": "acm" + }, + "acm-pca": { + "endpoint_prefix": "acm-pca", + "service_id": "acm-pca" + }, + "alexaforbusiness": { + "endpoint_prefix": "a4b", + "service_id": "alexa-for-business" + }, + "apigateway": { + "endpoint_prefix": "apigateway", + "service_id": "api-gateway" + }, + "application-autoscaling": { + "service_id": "application-auto-scaling" + }, + "appstream": { + "endpoint_prefix": "appstream2", + "service_id": "appstream" + }, + "appsync": { + "endpoint_prefix": "appsync", + "service_id": "appsync" + }, + "athena": { + "endpoint_prefix": "athena", + "service_id": "athena" + }, + "autoscaling": { + "endpoint_prefix": "autoscaling", + "service_id": "auto-scaling" + }, + "autoscaling-plans": { + "service_id": "auto-scaling-plans" + }, + "batch": { + "endpoint_prefix": "batch", + "service_id": "batch" + }, + "budgets": { + "endpoint_prefix": "budgets", + "service_id": "budgets" + }, + "ce": { + "endpoint_prefix": "ce", + "service_id": "cost-explorer" + }, + "cloud9": { + "endpoint_prefix": "cloud9", + "service_id": "cloud9" + }, + "clouddirectory": { + "endpoint_prefix": "clouddirectory", + "service_id": "clouddirectory" + }, + "cloudformation": { + "endpoint_prefix": "cloudformation", + "service_id": "cloudformation" + }, + "cloudfront": { + "endpoint_prefix": "cloudfront", + "service_id": "cloudfront" + }, + "cloudhsm": { + "endpoint_prefix": "cloudhsm", + "service_id": "cloudhsm" + }, + "cloudhsmv2": { + "endpoint_prefix": "cloudhsmv2", + "service_id": "cloudhsm-v2" + }, + "cloudsearch": { + "endpoint_prefix": "cloudsearch", + "service_id": "cloudsearch" + }, + "cloudsearchdomain": { + "endpoint_prefix": "cloudsearchdomain", + "service_id": "cloudsearch-domain" + }, + "cloudtrail": { + "endpoint_prefix": "cloudtrail", + "service_id": "cloudtrail" + }, + "cloudwatch": { + "endpoint_prefix": "monitoring", + "service_id": "cloudwatch" + }, + "codebuild": { + "endpoint_prefix": "codebuild", + "service_id": "codebuild" + }, + "codecommit": { + "endpoint_prefix": "codecommit", + "service_id": "codecommit" + }, + "codedeploy": { + "endpoint_prefix": "codedeploy", + "service_id": "codedeploy" + }, + "codepipeline": { + "endpoint_prefix": "codepipeline", + "service_id": "codepipeline" + }, + "codestar": { + "endpoint_prefix": "codestar", + "service_id": "codestar" + }, + "cognito-identity": { + "endpoint_prefix": "cognito-identity", + "service_id": "cognito-identity" + }, + "cognito-idp": { + "endpoint_prefix": "cognito-idp", + "service_id": "cognito-identity-provider" + }, + "cognito-sync": { + "endpoint_prefix": "cognito-sync", + "service_id": "cognito-sync" + }, + "comprehend": { + "endpoint_prefix": "comprehend", + "service_id": "comprehend" + }, + "config": { + "endpoint_prefix": "config", + "service_id": "config-service" + }, + "connect": { + "endpoint_prefix": "connect", + "service_id": "connect" + }, + "cur": { + "endpoint_prefix": "cur", + "service_id": "cost-and-usage-report-service" + }, + "datapipeline": { + "endpoint_prefix": "datapipeline", + "service_id": "data-pipeline" + }, + "dax": { + "endpoint_prefix": "dax", + "service_id": "dax" + }, + "devicefarm": { + "endpoint_prefix": "devicefarm", + "service_id": "device-farm" + }, + "directconnect": { + "endpoint_prefix": "directconnect", + "service_id": "direct-connect" + }, + "discovery": { + "endpoint_prefix": "discovery", + "service_id": "application-discovery-service" + }, + "dlm": { + "endpoint_prefix": "dlm", + "service_id": "dlm" + }, + "dms": { + "endpoint_prefix": "dms", + "service_id": "database-migration-service" + }, + "ds": { + "endpoint_prefix": "ds", + "service_id": "directory-service" + }, + "dynamodb": { + "endpoint_prefix": "dynamodb", + "service_id": "dynamodb" + }, + "dynamodbstreams": { + "endpoint_prefix": "streams.dynamodb", + "service_id": "dynamodb-streams" + }, + "ec2": { + "endpoint_prefix": "ec2", + "service_id": "ec2" + }, + "ecr": { + "endpoint_prefix": "ecr", + "service_id": "ecr" + }, + "ecs": { + "endpoint_prefix": "ecs", + "service_id": "ecs" + }, + "efs": { + "endpoint_prefix": "elasticfilesystem", + "service_id": "efs" + }, + "eks": { + "endpoint_prefix": "eks", + "service_id": "eks" + }, + "elasticache": { + "endpoint_prefix": "elasticache", + "service_id": "elasticache" + }, + "elasticbeanstalk": { + "endpoint_prefix": "elasticbeanstalk", + "service_id": "elastic-beanstalk" + }, + "elastictranscoder": { + "endpoint_prefix": "elastictranscoder", + "service_id": "elastic-transcoder" + }, + "elb": { + "endpoint_prefix": "elasticloadbalancing", + "service_id": "elastic-load-balancing" + }, + "elbv2": { + "service_id": "elastic-load-balancing-v2" + }, + "emr": { + "endpoint_prefix": "elasticmapreduce", + "service_id": "emr" + }, + "es": { + "endpoint_prefix": "es", + "service_id": "elasticsearch-service" + }, + "events": { + "endpoint_prefix": "events", + "service_id": "cloudwatch-events" + }, + "firehose": { + "endpoint_prefix": "firehose", + "service_id": "firehose" + }, + "fms": { + "endpoint_prefix": "fms", + "service_id": "fms" + }, + "gamelift": { + "endpoint_prefix": "gamelift", + "service_id": "gamelift" + }, + "glacier": { + "endpoint_prefix": "glacier", + "service_id": "glacier" + }, + "glue": { + "endpoint_prefix": "glue", + "service_id": "glue" + }, + "greengrass": { + "endpoint_prefix": "greengrass", + "service_id": "greengrass" + }, + "guardduty": { + "endpoint_prefix": "guardduty", + "service_id": "guardduty" + }, + "health": { + "endpoint_prefix": "health", + "service_id": "health" + }, + "iam": { + "endpoint_prefix": "iam", + "service_id": "iam" + }, + "importexport": { + "endpoint_prefix": "importexport", + "service_id": "importexport" + }, + "inspector": { + "endpoint_prefix": "inspector", + "service_id": "inspector" + }, + "iot": { + "endpoint_prefix": "iot", + "service_id": "iot" + }, + "iot-data": { + "endpoint_prefix": "data.iot", + "service_id": "iot-data-plane" + }, + "iot-jobs-data": { + "endpoint_prefix": "data.jobs.iot", + "service_id": "iot-jobs-data-plane" + }, + "iot1click-devices": { + "endpoint_prefix": "devices.iot1click", + "service_id": "iot-1click-devices-service" + }, + "iot1click-projects": { + "endpoint_prefix": "projects.iot1click", + "service_id": "iot-1click-projects" + }, + "iotanalytics": { + "endpoint_prefix": "iotanalytics", + "service_id": "iotanalytics" + }, + "kinesis": { + "endpoint_prefix": "kinesis", + "service_id": "kinesis" + }, + "kinesis-video-archived-media": { + "service_id": "kinesis-video-archived-media" + }, + "kinesis-video-media": { + "service_id": "kinesis-video-media" + }, + "kinesisanalytics": { + "endpoint_prefix": "kinesisanalytics", + "service_id": "kinesis-analytics" + }, + "kinesisvideo": { + "endpoint_prefix": "kinesisvideo", + "service_id": "kinesis-video" + }, + "kms": { + "endpoint_prefix": "kms", + "service_id": "kms" + }, + "lambda": { + "endpoint_prefix": "lambda", + "service_id": "lambda" + }, + "lex-models": { + "endpoint_prefix": "models.lex", + "service_id": "lex-model-building-service" + }, + "lex-runtime": { + "endpoint_prefix": "runtime.lex", + "service_id": "lex-runtime-service" + }, + "lightsail": { + "endpoint_prefix": "lightsail", + "service_id": "lightsail" + }, + "logs": { + "endpoint_prefix": "logs", + "service_id": "cloudwatch-logs" + }, + "machinelearning": { + "endpoint_prefix": "machinelearning", + "service_id": "machine-learning" + }, + "macie": { + "endpoint_prefix": "macie", + "service_id": "macie" + }, + "marketplace-entitlement": { + "endpoint_prefix": "entitlement.marketplace", + "service_id": "marketplace-entitlement-service" + }, + "marketplacecommerceanalytics": { + "endpoint_prefix": "marketplacecommerceanalytics", + "service_id": "marketplace-commerce-analytics" + }, + "mediaconvert": { + "endpoint_prefix": "mediaconvert", + "service_id": "mediaconvert" + }, + "medialive": { + "endpoint_prefix": "medialive", + "service_id": "medialive" + }, + "mediapackage": { + "endpoint_prefix": "mediapackage", + "service_id": "mediapackage" + }, + "mediastore": { + "endpoint_prefix": "mediastore", + "service_id": "mediastore" + }, + "mediastore-data": { + "endpoint_prefix": "data.mediastore", + "service_id": "mediastore-data" + }, + "mediatailor": { + "endpoint_prefix": "api.mediatailor", + "service_id": "mediatailor" + }, + "meteringmarketplace": { + "endpoint_prefix": "metering.marketplace", + "service_id": "marketplace-metering" + }, + "mgh": { + "endpoint_prefix": "mgh", + "service_id": "migration-hub" + }, + "mobile": { + "endpoint_prefix": "mobile", + "service_id": "mobile" + }, + "mq": { + "endpoint_prefix": "mq", + "service_id": "mq" + }, + "mturk": { + "endpoint_prefix": "mturk-requester", + "service_id": "mturk" + }, + "neptune": { + "service_id": "neptune" + }, + "opsworks": { + "endpoint_prefix": "opsworks", + "service_id": "opsworks" + }, + "opsworkscm": { + "endpoint_prefix": "opsworks-cm", + "service_id": "opsworkscm" + }, + "organizations": { + "endpoint_prefix": "organizations", + "service_id": "organizations" + }, + "pi": { + "endpoint_prefix": "pi", + "service_id": "pi" + }, + "pinpoint": { + "endpoint_prefix": "pinpoint", + "service_id": "pinpoint" + }, + "polly": { + "endpoint_prefix": "polly", + "service_id": "polly" + }, + "pricing": { + "endpoint_prefix": "api.pricing", + "service_id": "pricing" + }, + "rds": { + "endpoint_prefix": "rds", + "service_id": "rds" + }, + "redshift": { + "endpoint_prefix": "redshift", + "service_id": "redshift" + }, + "rekognition": { + "endpoint_prefix": "rekognition", + "service_id": "rekognition" + }, + "resource-groups": { + "endpoint_prefix": "resource-groups", + "service_id": "resource-groups" + }, + "resourcegroupstaggingapi": { + "endpoint_prefix": "tagging", + "service_id": "resource-groups-tagging-api" + }, + "route53": { + "endpoint_prefix": "route53", + "service_id": "route-53" + }, + "route53domains": { + "endpoint_prefix": "route53domains", + "service_id": "route-53-domains" + }, + "s3": { + "endpoint_prefix": "s3", + "service_id": "s3" + }, + "sagemaker": { + "endpoint_prefix": "api.sagemaker", + "service_id": "sagemaker" + }, + "sagemaker-runtime": { + "endpoint_prefix": "runtime.sagemaker", + "service_id": "sagemaker-runtime" + }, + "sdb": { + "endpoint_prefix": "sdb", + "service_id": "simpledb" + }, + "secretsmanager": { + "endpoint_prefix": "secretsmanager", + "service_id": "secrets-manager" + }, + "serverlessrepo": { + "endpoint_prefix": "serverlessrepo", + "service_id": "serverlessapplicationrepository" + }, + "servicecatalog": { + "endpoint_prefix": "servicecatalog", + "service_id": "service-catalog" + }, + "servicediscovery": { + "endpoint_prefix": "servicediscovery", + "service_id": "servicediscovery" + }, + "ses": { + "endpoint_prefix": "email", + "service_id": "ses" + }, + "shield": { + "endpoint_prefix": "shield", + "service_id": "shield" + }, + "sms": { + "endpoint_prefix": "sms", + "service_id": "sms" + }, + "snowball": { + "endpoint_prefix": "snowball", + "service_id": "snowball" + }, + "sns": { + "endpoint_prefix": "sns", + "service_id": "sns" + }, + "sqs": { + "endpoint_prefix": "sqs", + "service_id": "sqs" + }, + "ssm": { + "endpoint_prefix": "ssm", + "service_id": "ssm" + }, + "stepfunctions": { + "endpoint_prefix": "states", + "service_id": "sfn" + }, + "storagegateway": { + "endpoint_prefix": "storagegateway", + "service_id": "storage-gateway" + }, + "sts": { + "endpoint_prefix": "sts", + "service_id": "sts" + }, + "support": { + "endpoint_prefix": "support", + "service_id": "support" + }, + "swf": { + "endpoint_prefix": "swf", + "service_id": "swf" + }, + "transcribe": { + "endpoint_prefix": "transcribe", + "service_id": "transcribe" + }, + "translate": { + "endpoint_prefix": "translate", + "service_id": "translate" + }, + "waf": { + "endpoint_prefix": "waf", + "service_id": "waf" + }, + "waf-regional": { + "endpoint_prefix": "waf-regional", + "service_id": "waf-regional" + }, + "workdocs": { + "endpoint_prefix": "workdocs", + "service_id": "workdocs" + }, + "workmail": { + "endpoint_prefix": "workmail", + "service_id": "workmail" + }, + "workspaces": { + "endpoint_prefix": "workspaces", + "service_id": "workspaces" + }, + "xray": { + "endpoint_prefix": "xray", + "service_id": "xray" + } +} + + +def test_event_alias(): + for client_name in SERVICES.keys(): + endpoint_prefix = SERVICES[client_name].get('endpoint_prefix') + service_id = SERVICES[client_name]['service_id'] + if endpoint_prefix is not None: + yield _assert_handler_called, client_name, endpoint_prefix + yield _assert_handler_called, client_name, service_id + yield _assert_handler_called, client_name, client_name + + +def _assert_handler_called(client_name, event_part): + hook_calls = [] + + def _hook(**kwargs): + hook_calls.append(kwargs['event_name']) + + session = _get_session() + session.register('creating-client-class.%s' % event_part, _hook) + session.create_client(client_name) + assert len(hook_calls) == 1 + + +def _get_session(): + session = Session() + session.set_credentials('foo', 'bar') + session.set_config_variable('region', 'us-west-2') + session.config_filename = 'no-exist-foo' + return session diff --git a/tests/functional/test_history.py b/tests/functional/test_history.py index 283fce55..1968dc83 100644 --- a/tests/functional/test_history.py +++ b/tests/functional/test_history.py @@ -2,7 +2,7 @@ from contextlib import contextmanager import mock -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber from botocore.history import BaseHistoryHandler from botocore.history import get_global_history_recorder @@ -20,6 +20,7 @@ class TestRecordStatementsInjections(BaseSessionTest): def setUp(self): super(TestRecordStatementsInjections, self).setUp() self.client = self.session.create_client('s3', 'us-west-2') + self.http_stubber = ClientHTTPStubber(self.client) self.s3_response_body = ( '' @@ -46,16 +47,9 @@ class TestRecordStatementsInjections(BaseSessionTest): if call[0] == event_type] return matching - @contextmanager - def patch_http_layer(self, response, status_code=200): - with mock.patch('botocore.endpoint.Session.send') as send: - send.return_value = mock.Mock(status_code=status_code, - headers={}, - content=response) - yield send - def test_does_record_api_call(self): - with self.patch_http_layer(self.s3_response_body): + self.http_stubber.add_response(body=self.s3_response_body) + with self.http_stubber: self.client.list_buckets() api_call_events = self._get_all_events_of_type('API_CALL') @@ -70,7 +64,8 @@ class TestRecordStatementsInjections(BaseSessionTest): self.assertEqual(source, 'BOTOCORE') def test_does_record_http_request(self): - with self.patch_http_layer(self.s3_response_body): + self.http_stubber.add_response(body=self.s3_response_body) + with self.http_stubber: self.client.list_buckets() http_request_events = self._get_all_events_of_type('HTTP_REQUEST') @@ -100,7 +95,8 @@ class TestRecordStatementsInjections(BaseSessionTest): self.assertEqual(source, 'BOTOCORE') def test_does_record_http_response(self): - with self.patch_http_layer(self.s3_response_body): + self.http_stubber.add_response(body=self.s3_response_body) + with self.http_stubber: self.client.list_buckets() http_response_events = self._get_all_events_of_type('HTTP_RESPONSE') @@ -119,7 +115,8 @@ class TestRecordStatementsInjections(BaseSessionTest): self.assertEqual(source, 'BOTOCORE') def test_does_record_parsed_response(self): - with self.patch_http_layer(self.s3_response_body): + self.http_stubber.add_response(body=self.s3_response_body) + with self.http_stubber: self.client.list_buckets() parsed_response_events = self._get_all_events_of_type( diff --git a/tests/functional/test_lex.py b/tests/functional/test_lex.py index 03936854..88ab4649 100644 --- a/tests/functional/test_lex.py +++ b/tests/functional/test_lex.py @@ -13,7 +13,7 @@ import mock from datetime import datetime -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber class TestLex(BaseSessionTest): @@ -21,6 +21,7 @@ class TestLex(BaseSessionTest): super(TestLex, self).setUp() self.region = 'us-west-2' self.client = self.session.create_client('lex-runtime', self.region) + self.http_stubber = ClientHTTPStubber(self.client) def test_unsigned_payload(self): params = { @@ -35,11 +36,10 @@ class TestLex(BaseSessionTest): with mock.patch('botocore.auth.datetime') as _datetime: _datetime.datetime.utcnow.return_value = timestamp - with mock.patch('botocore.endpoint.Session.send') as _send: - _send.return_value = mock.Mock( - status_code=200, headers={}, content=b'{}') + self.http_stubber.add_response(body=b'{}') + with self.http_stubber: self.client.post_content(**params) - request = _send.call_args[0][0] + request = self.http_stubber.requests[0] # The payload gets added to the string to sign, and then part of the # signature. The signature will be part of the authorization header. diff --git a/tests/functional/test_machinelearning.py b/tests/functional/test_machinelearning.py index 1ea17cae..107fb198 100644 --- a/tests/functional/test_machinelearning.py +++ b/tests/functional/test_machinelearning.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import mock -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber class TestMachineLearning(BaseSessionTest): @@ -21,20 +21,16 @@ class TestMachineLearning(BaseSessionTest): self.region = 'us-west-2' self.client = self.session.create_client( 'machinelearning', self.region) + self.http_stubber = ClientHTTPStubber(self.client) def test_predict(self): - with mock.patch('botocore.endpoint.Session.send') as \ - http_session_send_patch: - http_response = mock.Mock() - http_response.status_code = 200 - http_response.content = b'{}' - http_response.headers = {} - http_session_send_patch.return_value = http_response + self.http_stubber.add_response(body=b'{}') + with self.http_stubber: custom_endpoint = 'https://myendpoint.amazonaws.com/' self.client.predict( MLModelId='ml-foo', Record={'Foo': 'Bar'}, PredictEndpoint=custom_endpoint ) - sent_request = http_session_send_patch.call_args[0][0] + sent_request = self.http_stubber.requests[0] self.assertEqual(sent_request.url, custom_endpoint) diff --git a/tests/functional/test_neptune.py b/tests/functional/test_neptune.py new file mode 100644 index 00000000..187797b7 --- /dev/null +++ b/tests/functional/test_neptune.py @@ -0,0 +1,67 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import mock +from contextlib import contextmanager + +import botocore.session +from tests import BaseSessionTest, ClientHTTPStubber +from botocore.stub import Stubber +from tests import unittest + + +class TestNeptunePresignUrlInjection(BaseSessionTest): + + def setUp(self): + super(TestNeptunePresignUrlInjection, self).setUp() + self.client = self.session.create_client('neptune', 'us-west-2') + self.http_stubber = ClientHTTPStubber(self.client) + + def assert_presigned_url_injected_in_request(self, body): + self.assertIn('PreSignedUrl', body) + self.assertNotIn('SourceRegion', body) + + def test_create_db_cluster(self): + params = { + 'DBClusterIdentifier': 'my-cluster', + 'Engine': 'neptune', + 'SourceRegion': 'us-east-1' + } + response_body = ( + b'' + b'' + b'' + b'' + ) + self.http_stubber.add_response(body=response_body) + with self.http_stubber: + self.client.create_db_cluster(**params) + sent_request = self.http_stubber.requests[0] + self.assert_presigned_url_injected_in_request(sent_request.body) + + def test_copy_db_cluster_snapshot(self): + params = { + 'SourceDBClusterSnapshotIdentifier': 'source-db', + 'TargetDBClusterSnapshotIdentifier': 'target-db', + 'SourceRegion': 'us-east-1' + } + response_body = ( + b'' + b'' + b'' + b'' + ) + self.http_stubber.add_response(body=response_body) + with self.http_stubber: + self.client.copy_db_cluster_snapshot(**params) + sent_request = self.http_stubber.requests[0] + self.assert_presigned_url_injected_in_request(sent_request.body) diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py index 6c1a40d3..0207e9bf 100644 --- a/tests/functional/test_public_apis.py +++ b/tests/functional/test_public_apis.py @@ -14,6 +14,7 @@ from collections import defaultdict import mock +from tests import ClientHTTPStubber from botocore.session import Session from botocore.exceptions import NoCredentialsError from botocore import xform_name @@ -41,26 +42,24 @@ PUBLIC_API_TESTS = { } -class EarlyExit(BaseException): +class EarlyExit(Exception): pass -def _test_public_apis_will_not_be_signed(func, kwargs): - with mock.patch('botocore.endpoint.Session.send') as _send: - _send.side_effect = EarlyExit("we don't care about response here") +def _test_public_apis_will_not_be_signed(client, operation, kwargs): + with ClientHTTPStubber(client) as http_stubber: + http_stubber.responses.append(EarlyExit()) try: - func(**kwargs) + operation(**kwargs) except EarlyExit: pass - except NoCredentialsError: - assert False, "NoCredentialsError should not be triggered" - request = _send.call_args[0][0] - sig_v2_disabled = 'SignatureVersion=2' not in request.url - assert sig_v2_disabled, "SigV2 is incorrectly enabled" - sig_v3_disabled = 'X-Amzn-Authorization' not in request.headers - assert sig_v3_disabled, "SigV3 is incorrectly enabled" - sig_v4_disabled = 'Authorization' not in request.headers - assert sig_v4_disabled, "SigV4 is incorrectly enabled" + request = http_stubber.requests[0] + sig_v2_disabled = 'SignatureVersion=2' not in request.url + assert sig_v2_disabled, "SigV2 is incorrectly enabled" + sig_v3_disabled = 'X-Amzn-Authorization' not in request.headers + assert sig_v3_disabled, "SigV3 is incorrectly enabled" + sig_v4_disabled = 'Authorization' not in request.headers + assert sig_v4_disabled, "SigV4 is incorrectly enabled" def test_public_apis_will_not_be_signed(): @@ -74,4 +73,4 @@ def test_public_apis_will_not_be_signed(): for operation_name in PUBLIC_API_TESTS[service_name]: kwargs = PUBLIC_API_TESTS[service_name][operation_name] method = getattr(client, xform_name(operation_name)) - yield (_test_public_apis_will_not_be_signed, method, kwargs) + yield _test_public_apis_will_not_be_signed, client, method, kwargs diff --git a/tests/functional/test_rds.py b/tests/functional/test_rds.py index 08087bde..6c56895c 100644 --- a/tests/functional/test_rds.py +++ b/tests/functional/test_rds.py @@ -14,7 +14,7 @@ import mock from contextlib import contextmanager import botocore.session -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber from botocore.stub import Stubber from tests import unittest @@ -24,14 +24,7 @@ class TestRDSPresignUrlInjection(BaseSessionTest): def setUp(self): super(TestRDSPresignUrlInjection, self).setUp() self.client = self.session.create_client('rds', 'us-west-2') - - @contextmanager - def patch_http_layer(self, response, status_code=200): - with mock.patch('botocore.endpoint.Session.send') as send: - send.return_value = mock.Mock(status_code=status_code, - headers={}, - content=response) - yield send + self.http_stubber = ClientHTTPStubber(self.client) def assert_presigned_url_injected_in_request(self, body): self.assertIn('PreSignedUrl', body) @@ -48,9 +41,10 @@ class TestRDSPresignUrlInjection(BaseSessionTest): b'' b'' ) - with self.patch_http_layer(response_body) as send: + self.http_stubber.add_response(body=response_body) + with self.http_stubber: self.client.copy_db_snapshot(**params) - sent_request = send.call_args[0][0] + sent_request = self.http_stubber.requests[0] self.assert_presigned_url_injected_in_request(sent_request.body) def test_create_db_instance_read_replica(self): @@ -65,9 +59,10 @@ class TestRDSPresignUrlInjection(BaseSessionTest): b'' b'' ) - with self.patch_http_layer(response_body) as send: + self.http_stubber.add_response(body=response_body) + with self.http_stubber: self.client.create_db_instance_read_replica(**params) - sent_request = send.call_args[0][0] + sent_request = self.http_stubber.requests[0] self.assert_presigned_url_injected_in_request(sent_request.body) diff --git a/tests/functional/test_retry.py b/tests/functional/test_retry.py index dcb3d801..5fd8598f 100644 --- a/tests/functional/test_retry.py +++ b/tests/functional/test_retry.py @@ -10,7 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import BaseSessionTest, mock +import contextlib +from tests import BaseSessionTest, mock, ClientHTTPStubber from botocore.exceptions import ClientError from botocore.config import Config @@ -26,36 +27,30 @@ class TestRetry(BaseSessionTest): def tearDown(self): self.sleep_patch.stop() - def add_n_retryable_responses(self, mock_send, num_responses): - responses = [] - for _ in range(num_responses): - http_response = mock.Mock() - http_response.status_code = 500 - http_response.headers = {} - http_response.content = b'{}' - responses.append(http_response) - mock_send.side_effect = responses - - def assert_will_retry_n_times(self, method, num_retries): + @contextlib.contextmanager + def assert_will_retry_n_times(self, client, num_retries): num_responses = num_retries + 1 - with mock.patch('botocore.endpoint.Session.send') as mock_send: - self.add_n_retryable_responses(mock_send, num_responses) + with ClientHTTPStubber(client) as http_stubber: + for _ in range(num_responses): + http_stubber.add_response(status=500, body=b'{}') with self.assertRaisesRegexp( ClientError, 'reached max retries: %s' % num_retries): - method() - self.assertEqual(mock_send.call_count, num_responses) + yield + self.assertEqual(len(http_stubber.requests), num_responses) def test_can_override_max_attempts(self): client = self.session.create_client( 'dynamodb', self.region, config=Config( retries={'max_attempts': 1})) - self.assert_will_retry_n_times(client.list_tables, 1) + with self.assert_will_retry_n_times(client, 1): + client.list_tables() def test_do_not_attempt_retries(self): client = self.session.create_client( 'dynamodb', self.region, config=Config( retries={'max_attempts': 0})) - self.assert_will_retry_n_times(client.list_tables, 0) + with self.assert_will_retry_n_times(client, 0): + client.list_tables() def test_setting_max_attempts_does_not_set_for_other_clients(self): # Make one client with max attempts configured. @@ -67,7 +62,8 @@ class TestRetry(BaseSessionTest): client = self.session.create_client('codecommit', self.region) # It should use the default max retries, which should be four retries # for this service. - self.assert_will_retry_n_times(client.list_repositories, 4) + with self.assert_will_retry_n_times(client, 4): + client.list_repositories() def test_service_specific_defaults_do_not_mutate_general_defaults(self): # This tests for a bug where if you created a client for a service @@ -79,19 +75,22 @@ class TestRetry(BaseSessionTest): # Make a dynamodb client. It's a special case client that is # configured to a make a maximum of 10 requests (9 retries). client = self.session.create_client('dynamodb', self.region) - self.assert_will_retry_n_times(client.list_tables, 9) + with self.assert_will_retry_n_times(client, 9): + client.list_tables() # A codecommit client is not a special case for retries. It will at # most make 5 requests (4 retries) for its default. client = self.session.create_client('codecommit', self.region) - self.assert_will_retry_n_times(client.list_repositories, 4) + with self.assert_will_retry_n_times(client, 4): + client.list_repositories() def test_set_max_attempts_on_session(self): self.session.set_default_client_config( Config(retries={'max_attempts': 1})) # Max attempts should be inherited from the session. client = self.session.create_client('codecommit', self.region) - self.assert_will_retry_n_times(client.list_repositories, 1) + with self.assert_will_retry_n_times(client, 1): + client.list_repositories() def test_can_clobber_max_attempts_on_session(self): self.session.set_default_client_config( @@ -100,4 +99,5 @@ class TestRetry(BaseSessionTest): client = self.session.create_client( 'codecommit', self.region, config=Config( retries={'max_attempts': 0})) - self.assert_will_retry_n_times(client.list_repositories, 0) + with self.assert_will_retry_n_times(client, 0): + client.list_repositories() diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index 0ce15572..773a690d 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest, mock, BaseSessionTest, create_session +from tests import unittest, mock, BaseSessionTest, create_session, ClientHTTPStubber from nose.tools import assert_equal import botocore.session @@ -35,30 +35,22 @@ class BaseS3OperationTest(BaseSessionTest): self.region = 'us-west-2' self.client = self.session.create_client( 's3', self.region) - self.session_send_patch = mock.patch('botocore.endpoint.Session.send') - self.http_session_send_mock = self.session_send_patch.start() - - def tearDown(self): - super(BaseSessionTest, self).tearDown() - self.session_send_patch.stop() + self.http_stubber = ClientHTTPStubber(self.client) class TestOnlyAsciiCharsAllowed(BaseS3OperationTest): def test_validates_non_ascii_chars_trigger_validation_error(self): - self.http_session_send_mock.return_value = mock.Mock(status_code=200, - headers={}, - content=b'') - with self.assertRaises(ParamValidationError): - self.client.put_object( - Bucket='foo', Key='bar', Metadata={ - 'goodkey': 'good', 'non-ascii': u'\u2713'}) + self.http_stubber.add_response() + with self.http_stubber: + with self.assertRaises(ParamValidationError): + self.client.put_object( + Bucket='foo', Key='bar', Metadata={ + 'goodkey': 'good', 'non-ascii': u'\u2713'}) class TestS3GetBucketLifecycle(BaseS3OperationTest): def test_multiple_transitions_returns_one(self): - http_response = mock.Mock() - http_response.status_code = 200 - http_response.content = ( + response_body = ( '' '' @@ -90,10 +82,10 @@ class TestS3GetBucketLifecycle(BaseS3OperationTest): ' ' '' ).encode('utf-8') - http_response.headers = {} - self.http_session_send_mock.return_value = http_response s3 = self.session.create_client('s3') - response = s3.get_bucket_lifecycle(Bucket='mybucket') + with ClientHTTPStubber(s3) as http_stubber: + http_stubber.add_response(body=response_body) + response = s3.get_bucket_lifecycle(Bucket='mybucket') # Each Transition member should have at least one of the # transitions provided. self.assertEqual( @@ -130,24 +122,15 @@ class TestS3PutObject(BaseS3OperationTest): 'Content-Length: 0\r\n' 'Server: AmazonS3\r\n' ).encode('utf-8') - http_500_response = mock.Mock() - http_500_response.status_code = 500 - http_500_response.content = non_xml_content - http_500_response.headers = {} - - success_response = mock.Mock() - success_response.status_code = 200 - success_response.content = b'' - success_response.headers = {} - - self.http_session_send_mock.side_effect = [ - http_500_response, success_response - ] s3 = self.session.create_client('s3') - response = s3.put_object(Bucket='mybucket', Key='mykey', Body=b'foo') - # The first response should have been retried even though the xml is - # invalid and eventually return the 200 response. - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + with ClientHTTPStubber(s3) as http_stubber: + http_stubber.add_response(status=500, body=non_xml_content) + http_stubber.add_response() + response = s3.put_object(Bucket='mybucket', Key='mykey', Body=b'foo') + # The first response should have been retried even though the xml is + # invalid and eventually return the 200 response. + self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + self.assertEqual(len(http_stubber.requests), 2) class TestS3SigV4(BaseS3OperationTest): @@ -155,17 +138,15 @@ class TestS3SigV4(BaseS3OperationTest): super(TestS3SigV4, self).setUp() self.client = self.session.create_client( 's3', self.region, config=Config(signature_version='s3v4')) - self.response_mock = mock.Mock() - self.response_mock.content = b'' - self.response_mock.headers = {} - self.response_mock.status_code = 200 - self.http_session_send_mock.return_value = self.response_mock + self.http_stubber = ClientHTTPStubber(self.client) + self.http_stubber.add_response() def get_sent_headers(self): - return self.http_session_send_mock.mock_calls[0][1][0].headers + return self.http_stubber.requests[0].headers def test_content_md5_set(self): - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + with self.http_stubber: + self.client.put_object(Bucket='foo', Key='bar', Body='baz') self.assertIn('content-md5', self.get_sent_headers()) def test_content_sha256_set_if_config_value_is_true(self): @@ -174,7 +155,10 @@ class TestS3SigV4(BaseS3OperationTest): }) self.client = self.session.create_client( 's3', self.region, config=config) - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + self.http_stubber = ClientHTTPStubber(self.client) + self.http_stubber.add_response() + with self.http_stubber: + self.client.put_object(Bucket='foo', Key='bar', Body='baz') sent_headers = self.get_sent_headers() sha_header = sent_headers.get('x-amz-content-sha256') self.assertNotEqual(sha_header, b'UNSIGNED-PAYLOAD') @@ -185,7 +169,10 @@ class TestS3SigV4(BaseS3OperationTest): }) self.client = self.session.create_client( 's3', self.region, config=config) - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + self.http_stubber = ClientHTTPStubber(self.client) + self.http_stubber.add_response() + with self.http_stubber: + self.client.put_object(Bucket='foo', Key='bar', Body='baz') sent_headers = self.get_sent_headers() sha_header = sent_headers.get('x-amz-content-sha256') self.assertEqual(sha_header, b'UNSIGNED-PAYLOAD') @@ -193,7 +180,8 @@ class TestS3SigV4(BaseS3OperationTest): def test_content_sha256_set_if_md5_is_unavailable(self): with mock.patch('botocore.auth.MD5_AVAILABLE', False): with mock.patch('botocore.handlers.MD5_AVAILABLE', False): - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + with self.http_stubber: + self.client.put_object(Bucket='foo', Key='bar', Body='baz') sent_headers = self.get_sent_headers() unsigned = 'UNSIGNED-PAYLOAD' self.assertNotEqual(sent_headers['x-amz-content-sha256'], unsigned) @@ -206,17 +194,15 @@ class TestCanSendIntegerHeaders(BaseSessionTest): def test_int_values_with_sigv4(self): s3 = self.session.create_client( 's3', config=Config(signature_version='s3v4')) - with mock.patch('botocore.endpoint.Session.send') as mock_send: - mock_send.return_value = mock.Mock(status_code=200, - content=b'', - headers={}) + with ClientHTTPStubber(s3) as http_stubber: + http_stubber.add_response() s3.upload_part(Bucket='foo', Key='bar', Body=b'foo', UploadId='bar', PartNumber=1, ContentLength=3) - headers = mock_send.call_args[0][0].headers + headers = http_stubber.requests[0].headers # Verify that the request integer value of 3 has been converted to # string '3'. This also means we've made it pass the signer which # expects string values in order to sign properly. - self.assertEqual(headers['Content-Length'], '3') + self.assertEqual(headers['Content-Length'], b'3') @@ -228,186 +214,151 @@ class TestRegionRedirect(BaseS3OperationTest): signature_version='s3v4', s3={'addressing_style': 'path'}, )) + self.http_stubber = ClientHTTPStubber(self.client) - self.redirect_response = mock.Mock() - self.redirect_response.headers = { - 'x-amz-bucket-region': 'eu-central-1' + self.redirect_response = { + 'status': 301, + 'headers': {'x-amz-bucket-region': 'eu-central-1'}, + 'body': ( + b'\n' + b'' + b' PermanentRedirect' + b' The bucket you are attempting to access must be' + b' addressed using the specified endpoint. Please send ' + b' all future requests to this endpoint.' + b' ' + b' foo' + b' foo.s3.eu-central-1.amazonaws.com' + b'' + ) } - self.redirect_response.status_code = 301 - self.redirect_response.content = ( - b'\n' - b'' - b' PermanentRedirect' - b' The bucket you are attempting to access must be ' - b' addressed using the specified endpoint. Please send all ' - b' future requests to this endpoint.' - b' ' - b' foo' - b' foo.s3.eu-central-1.amazonaws.com' - b'') - - self.bad_signing_region_response = mock.Mock() - self.bad_signing_region_response.headers = { - 'x-amz-bucket-region': 'eu-central-1' + self.bad_signing_region_response = { + 'status': 400, + 'headers': {'x-amz-bucket-region': 'eu-central-1'}, + 'body': ( + b'' + b'' + b' AuthorizationHeaderMalformed' + b' the region us-west-2 is wrong; ' + b'expecting eu-central-1' + b' eu-central-1' + b' BD9AA1730D454E39' + b' ' + b'' + ) + } + self.success_response = { + 'status': 200, + 'headers': {}, + 'body': ( + b'\n' + b'' + b' foo' + b' ' + b' ' + b' 1000' + b' url' + b' false' + b'' + ) } - self.bad_signing_region_response.status_code = 400 - self.bad_signing_region_response.content = ( - b'' - b'' - b' AuthorizationHeaderMalformed' - b' the region us-west-2 is wrong; ' - b'expecting eu-central-1' - b' eu-central-1' - b' BD9AA1730D454E39' - b' ' - b'' - ) - - self.success_response = mock.Mock() - self.success_response.headers = {} - self.success_response.status_code = 200 - self.success_response.content = ( - b'\n' - b'' - b' foo' - b' ' - b' ' - b' 1000' - b' url' - b' false' - b'') - - def create_response(self, content=b'', - status_code=200, headers=None): - response = mock.Mock() - if headers is None: - headers = {} - response.headers = headers - response.content = content - response.status_code = status_code - return response def test_region_redirect(self): - self.http_session_send_mock.side_effect = [ - self.redirect_response, self.success_response] - response = self.client.list_objects(Bucket='foo') + self.http_stubber.add_response(**self.redirect_response) + self.http_stubber.add_response(**self.success_response) + with self.http_stubber: + response = self.client.list_objects(Bucket='foo') self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertEqual(self.http_session_send_mock.call_count, 2) + self.assertEqual(len(self.http_stubber.requests), 2) - calls = [c[0][0] for c in self.http_session_send_mock.call_args_list] initial_url = ('https://s3.us-west-2.amazonaws.com/foo' '?encoding-type=url') - self.assertEqual(calls[0].url, initial_url) + self.assertEqual(self.http_stubber.requests[0].url, initial_url) fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo' '?encoding-type=url') - self.assertEqual(calls[1].url, fixed_url) + self.assertEqual(self.http_stubber.requests[1].url, fixed_url) def test_region_redirect_cache(self): - self.http_session_send_mock.side_effect = [ - self.redirect_response, self.success_response, - self.success_response] + self.http_stubber.add_response(**self.redirect_response) + self.http_stubber.add_response(**self.success_response) + self.http_stubber.add_response(**self.success_response) + + with self.http_stubber: + first_response = self.client.list_objects(Bucket='foo') + second_response = self.client.list_objects(Bucket='foo') - first_response = self.client.list_objects(Bucket='foo') self.assertEqual( first_response['ResponseMetadata']['HTTPStatusCode'], 200) - second_response = self.client.list_objects(Bucket='foo') self.assertEqual( second_response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertEqual(self.http_session_send_mock.call_count, 3) - calls = [c[0][0] for c in self.http_session_send_mock.call_args_list] + self.assertEqual(len(self.http_stubber.requests), 3) initial_url = ('https://s3.us-west-2.amazonaws.com/foo' '?encoding-type=url') - self.assertEqual(calls[0].url, initial_url) + self.assertEqual(self.http_stubber.requests[0].url, initial_url) fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo' '?encoding-type=url') - self.assertEqual(calls[1].url, fixed_url) - self.assertEqual(calls[2].url, fixed_url) + self.assertEqual(self.http_stubber.requests[1].url, fixed_url) + self.assertEqual(self.http_stubber.requests[2].url, fixed_url) def test_resign_request_with_region_when_needed(self): - self.http_session_send_mock.side_effect = [ - self.bad_signing_region_response, self.success_response, - ] # Create a client with no explicit configuration so we can # verify the default behavior. - client = self.session.create_client( - 's3', 'us-west-2') - first_response = client.list_objects(Bucket='foo') - self.assertEqual( - first_response['ResponseMetadata']['HTTPStatusCode'], 200) + client = self.session.create_client('s3', 'us-west-2') + with ClientHTTPStubber(client) as http_stubber: + http_stubber.add_response(**self.bad_signing_region_response) + http_stubber.add_response(**self.success_response) + first_response = client.list_objects(Bucket='foo') + self.assertEqual( + first_response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertEqual(self.http_session_send_mock.call_count, 2) - calls = [c[0][0] for c in self.http_session_send_mock.call_args_list] - initial_url = ('https://foo.s3.us-west-2.amazonaws.com/' - '?encoding-type=url') - self.assertEqual(calls[0].url, initial_url) + self.assertEqual(len(http_stubber.requests), 2) + initial_url = ('https://foo.s3.us-west-2.amazonaws.com/' + '?encoding-type=url') + self.assertEqual(http_stubber.requests[0].url, initial_url) - fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/' - '?encoding-type=url') - self.assertEqual(calls[1].url, fixed_url) + fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/' + '?encoding-type=url') + self.assertEqual(http_stubber.requests[1].url, fixed_url) def test_resign_request_in_us_east_1(self): - bad_request_response = self.create_response(status_code=400) - bad_head_bucket_response = self.create_response( - status_code=400, - headers={'x-amz-bucket-region': 'eu-central-1'} - ) - head_bucket_response = self.create_response( - headers={ - 'x-amz-bucket-region': 'eu-central-1' - }, - status_code=200, - ) - request_response = self.create_response(status_code=200) - self.http_session_send_mock.side_effect = [ - bad_request_response, - bad_head_bucket_response, - head_bucket_response, - request_response, - ] + region_headers = {'x-amz-bucket-region': 'eu-central-1'} # Verify that the default behavior in us-east-1 will redirect client = self.session.create_client('s3', 'us-east-1') - response = client.head_object(Bucket='foo', Key='bar') - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + with ClientHTTPStubber(client) as http_stubber: + http_stubber.add_response(status=400) + http_stubber.add_response(status=400, headers=region_headers) + http_stubber.add_response(headers=region_headers) + http_stubber.add_response() + response = client.head_object(Bucket='foo', Key='bar') + self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertEqual(self.http_session_send_mock.call_count, 4) - calls = [c[0][0] for c in self.http_session_send_mock.call_args_list] - initial_url = ('https://foo.s3.amazonaws.com/bar') - self.assertEqual(calls[0].url, initial_url) + self.assertEqual(len(http_stubber.requests), 4) + initial_url = ('https://foo.s3.amazonaws.com/bar') + self.assertEqual(http_stubber.requests[0].url, initial_url) - fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/bar') - self.assertEqual(calls[-1].url, fixed_url) + fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/bar') + self.assertEqual(http_stubber.requests[-1].url, fixed_url) def test_resign_request_in_us_east_1_fails(self): - bad_request_response = self.create_response(status_code=400) - bad_head_bucket_response = self.create_response( - status_code=400, - headers={'x-amz-bucket-region': 'eu-central-1'} - ) - head_bucket_response = self.create_response( - headers={ - 'x-amz-bucket-region': 'eu-central-1' - } - ) - # The final request still fails with a 400. - request_response = self.create_response(status_code=400) - - self.http_session_send_mock.side_effect = [ - bad_request_response, - bad_head_bucket_response, - head_bucket_response, - request_response, - ] + region_headers = {'x-amz-bucket-region': 'eu-central-1'} # Verify that the final 400 response is propagated # back to the user. client = self.session.create_client('s3', 'us-east-1') - with self.assertRaises(ClientError) as e: - client.head_object(Bucket='foo', Key='bar') + with ClientHTTPStubber(client) as http_stubber: + http_stubber.add_response(status=400) + http_stubber.add_response(status=400, headers=region_headers) + http_stubber.add_response(headers=region_headers) + # The final request still fails with a 400. + http_stubber.add_response(status=400) + with self.assertRaises(ClientError) as e: + client.head_object(Bucket='foo', Key='bar') + self.assertEqual(len(http_stubber.requests), 4) class TestGeneratePresigned(BaseS3OperationTest): @@ -860,10 +811,6 @@ def _verify_expected_endpoint_url(region, bucket, key, s3_config, is_secure=True, customer_provided_endpoint=None, expected_url=None, signature_version=None): - http_response = mock.Mock() - http_response.status_code = 200 - http_response.headers = {} - http_response.content = b'' environ = {} with mock.patch('os.environ', environ): environ['AWS_ACCESS_KEY_ID'] = 'access_key' @@ -879,12 +826,10 @@ def _verify_expected_endpoint_url(region, bucket, key, s3_config, s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, config=config, endpoint_url=customer_provided_endpoint) - with mock.patch('botocore.endpoint.Session.send') as mock_send: - mock_send.return_value = http_response - s3.put_object(Bucket=bucket, - Key=key, Body=b'bar') - request_sent = mock_send.call_args[0][0] - assert_equal(request_sent.url, expected_url) + with ClientHTTPStubber(s3) as http_stubber: + http_stubber.add_response() + s3.put_object(Bucket=bucket, Key=key, Body=b'bar') + assert_equal(http_stubber.requests[0].url, expected_url) def _create_s3_client(region, is_secure, endpoint_url, s3_config, diff --git a/tests/integration/test_client.py b/tests/integration/test_client.py index 8fb34860..bbdb5d50 100644 --- a/tests/integration/test_client.py +++ b/tests/integration/test_client.py @@ -219,3 +219,16 @@ class TestClientInjection(unittest.TestCase): # We should now have access to the extra_client_method above. self.assertEqual(client.extra_client_method('foo'), 'foo') + + +class TestMixedEndpointCasing(unittest.TestCase): + def setUp(self): + self.url = 'https://EC2.US-WEST-2.amazonaws.com/' + self.session = botocore.session.get_session() + self.client = self.session.create_client('ec2', 'us-west-2', + endpoint_url=self.url) + + def test_sigv4_is_correct_when_mixed_endpoint_casing(self): + res = self.client.describe_regions() + status_code = res['ResponseMetadata']['HTTPStatusCode'] + self.assertEqual(status_code, 200) diff --git a/tests/integration/test_client_http.py b/tests/integration/test_client_http.py new file mode 100644 index 00000000..e5c96f75 --- /dev/null +++ b/tests/integration/test_client_http.py @@ -0,0 +1,224 @@ +import select +import socket +import contextlib +import threading +from tests import unittest +from contextlib import contextmanager + +import botocore.session +from botocore.config import Config +from botocore.vendored.six.moves import BaseHTTPServer, socketserver +from botocore.exceptions import ( + ConnectTimeoutError, ReadTimeoutError, EndpointConnectionError, + ConnectionClosedError, +) +from botocore.vendored.requests import exceptions as requests_exceptions + + +class TestClientHTTPBehavior(unittest.TestCase): + def setUp(self): + self.port = unused_port() + self.localhost = 'http://localhost:%s/' % self.port + self.session = botocore.session.get_session() + + def test_can_proxy_https_request_with_auth(self): + proxy_url = 'http://user:pass@localhost:%s/' % self.port + config = Config(proxies={'https': proxy_url}, region_name='us-west-1') + client = self.session.create_client('ec2', config=config) + + class AuthProxyHandler(ProxyHandler): + event = threading.Event() + + def validate_auth(self): + proxy_auth = self.headers.get('Proxy-Authorization') + return proxy_auth == 'Basic dXNlcjpwYXNz' + + try: + with background(run_server, args=(AuthProxyHandler, self.port)): + AuthProxyHandler.event.wait(timeout=60) + client.describe_regions() + except BackgroundTaskFailed: + self.fail('Background task did not exit, proxy was not used.') + + def _read_timeout_server(self): + config = Config( + read_timeout=0.1, + retries={'max_attempts': 0}, + region_name='us-weast-2', + ) + client = self.session.create_client('ec2', endpoint_url=self.localhost, + config=config) + client_call_ended_event = threading.Event() + + class FakeEC2(SimpleHandler): + event = threading.Event() + msg = b'' + + def get_length(self): + return len(self.msg) + + def get_body(self): + client_call_ended_event.wait(timeout=60) + return self.msg + + try: + with background(run_server, args=(FakeEC2, self.port)): + try: + FakeEC2.event.wait(timeout=60) + client.describe_regions() + finally: + client_call_ended_event.set() + except BackgroundTaskFailed: + self.fail('Fake EC2 service was not called.') + + def test_read_timeout_exception(self): + with self.assertRaises(ReadTimeoutError): + self._read_timeout_server() + + def test_old_read_timeout_exception(self): + with self.assertRaises(requests_exceptions.ReadTimeout): + self._read_timeout_server() + + @unittest.skip('The current implementation will fail to timeout on linux') + def test_connect_timeout_exception(self): + config = Config( + connect_timeout=0.2, + retries={'max_attempts': 0}, + region_name='us-weast-2', + ) + client = self.session.create_client('ec2', endpoint_url=self.localhost, + config=config) + server_bound_event = threading.Event() + client_call_ended_event = threading.Event() + + def no_accept_server(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('', self.port)) + server_bound_event.set() + client_call_ended_event.wait(timeout=60) + sock.close() + + with background(no_accept_server): + server_bound_event.wait(timeout=60) + with self.assertRaises(ConnectTimeoutError): + client.describe_regions() + client_call_ended_event.set() + + def test_invalid_host_gaierror(self): + config = Config(retries={'max_attempts': 0}, region_name='us-weast-1') + endpoint = 'https://ec2.us-weast-1.amazonaws.com/' + client = self.session.create_client('ec2', endpoint_url=endpoint, + config=config) + with self.assertRaises(EndpointConnectionError): + client.describe_regions() + + def test_bad_status_line(self): + config = Config(retries={'max_attempts': 0}, region_name='us-weast-2') + client = self.session.create_client('ec2', endpoint_url=self.localhost, + config=config) + + class BadStatusHandler(BaseHTTPServer.BaseHTTPRequestHandler): + event = threading.Event() + + def do_POST(self): + self.wfile.write(b'garbage') + + with background(run_server, args=(BadStatusHandler, self.port)): + with self.assertRaises(ConnectionClosedError): + BadStatusHandler.event.wait(timeout=60) + client.describe_regions() + + +def unused_port(): + with contextlib.closing(socket.socket()) as sock: + sock.bind(('127.0.0.1', 0)) + return sock.getsockname()[1] + + +class SimpleHandler(BaseHTTPServer.BaseHTTPRequestHandler): + status = 200 + + def get_length(self): + return 0 + + def get_body(self): + return b'' + + def do_GET(self): + length = str(self.get_length()) + self.send_response(self.status) + self.send_header('Content-Length', length) + self.end_headers() + self.wfile.write(self.get_body()) + + do_POST = do_PUT = do_GET + + +class ProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler): + tunnel_chunk_size = 1024 + + def _tunnel(self, client, remote): + client.setblocking(0) + remote.setblocking(0) + sockets = [client, remote] + while True: + readable, writeable, _ = select.select(sockets, sockets, [], 1) + if client in readable and remote in writeable: + client_bytes = client.recv(self.tunnel_chunk_size) + if not client_bytes: + break + remote.sendall(client_bytes) + if remote in readable and client in writeable: + remote_bytes = remote.recv(self.tunnel_chunk_size) + if not remote_bytes: + break + client.sendall(remote_bytes) + + def do_CONNECT(self): + if not self.validate_auth(): + self.send_response(401) + self.end_headers() + return + + self.send_response(200) + self.end_headers() + + remote_host, remote_port = self.path.split(':') + remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + remote_socket.connect((remote_host, int(remote_port))) + + self._tunnel(self.request, remote_socket) + remote_socket.close() + + def validate_auth(self): + return True + + +class BackgroundTaskFailed(Exception): + pass + + +@contextmanager +def background(target, args=(), timeout=60): + thread = threading.Thread(target=target, args=args) + thread.daemon = True + thread.start() + try: + yield target + finally: + thread.join(timeout=timeout) + if thread.is_alive(): + msg = 'Background task did not exit in a timely manner.' + raise BackgroundTaskFailed(msg) + + +def run_server(handler, port): + address = ('', port) + httpd = socketserver.TCPServer(address, handler, bind_and_activate=False) + httpd.allow_reuse_address = True + httpd.server_bind() + httpd.server_activate() + handler.event.set() + httpd.handle_request() + httpd.server_close() diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 1f07ef0e..1a007aaf 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -11,7 +11,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest, temporary_file, random_chars +from tests import unittest, temporary_file, random_chars, ClientHTTPStubber import os import time from collections import defaultdict @@ -25,8 +25,8 @@ from contextlib import closing from nose.plugins.attrib import attr -from botocore.vendored.requests import adapters -from botocore.vendored.requests.exceptions import ConnectionError +from botocore.endpoint import Endpoint +from botocore.exceptions import ConnectionClosedError from botocore.compat import six, zip_longest import botocore.session import botocore.auth @@ -315,6 +315,11 @@ class TestS3Objects(TestS3BaseWithBucket): body = '*' * (5 * (1024 ** 2)) self.assert_can_put_object(body) + def test_can_put_object_bytearray(self): + body_bytes = b'*' * 1024 + body = bytearray(body_bytes) + self.assert_can_put_object(body) + def test_get_object_stream_wrapper(self): self.create_object('foobarbaz', body='body contents') response = self.client.get_object( @@ -411,6 +416,21 @@ class TestS3Objects(TestS3BaseWithBucket): self.assertEqual(len(parsed['Contents']), 1) self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08') + def test_unicode_system_character_with_list_v2(self): + # Verify we can use a unicode system character which would normally + # break the xml parser + key_name = 'foo\x08' + self.create_object(key_name) + self.addCleanup(self.delete_object, key_name, self.bucket_name) + parsed = self.client.list_objects_v2(Bucket=self.bucket_name) + self.assertEqual(len(parsed['Contents']), 1) + self.assertEqual(parsed['Contents'][0]['Key'], key_name) + + parsed = self.client.list_objects_v2(Bucket=self.bucket_name, + EncodingType='url') + self.assertEqual(len(parsed['Contents']), 1) + self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08') + def test_thread_safe_auth(self): self.auth_paths = [] self.session.register('before-sign', self.increment_auth) @@ -799,6 +819,7 @@ class TestS3SigV4Client(BaseS3ClientTest): super(TestS3SigV4Client, self).setUp() self.client = self.session.create_client( 's3', self.region, config=Config(signature_version='s3v4')) + self.http_stubber = ClientHTTPStubber(self.client) def test_can_get_bucket_location(self): # Even though the bucket is in us-west-2, we should still be able to @@ -812,19 +833,10 @@ class TestS3SigV4Client(BaseS3ClientTest): def test_request_retried_for_sigv4(self): body = six.BytesIO(b"Hello world!") - - original_send = adapters.HTTPAdapter.send - state = mock.Mock() - state.error_raised = False - - def mock_http_adapter_send(self, *args, **kwargs): - if not state.error_raised: - state.error_raised = True - raise ConnectionError("Simulated ConnectionError raised.") - else: - return original_send(self, *args, **kwargs) - with mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - mock_http_adapter_send): + exception = ConnectionClosedError(endpoint_url='') + self.http_stubber.responses.append(exception) + self.http_stubber.responses.append(None) + with self.http_stubber: response = self.client.put_object(Bucket=self.bucket_name, Key='foo.txt', Body=body) self.assert_status_code(response, 200) diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py index 28965371..12af3dcf 100644 --- a/tests/integration/test_smoke.py +++ b/tests/integration/test_smoke.py @@ -16,11 +16,12 @@ from pprint import pformat import warnings from nose.tools import assert_equal, assert_true +from tests import ClientHTTPStubber from botocore import xform_name import botocore.session from botocore.client import ClientError -from botocore.vendored.requests import adapters -from botocore.vendored.requests.exceptions import ConnectionError +from botocore.endpoint import Endpoint +from botocore.exceptions import ConnectionClosedError # Mapping of service -> api calls to try. @@ -91,6 +92,7 @@ SMOKE_TESTS = { 's3': {'ListBuckets': {}}, 'sdb': {'ListDomains': {}}, 'ses': {'ListIdentities': {}}, + 'shield': {'GetSubscriptionState': {}}, 'sns': {'ListTopics': {}}, 'sqs': {'ListQueues': {}}, 'ssm': {'ListDocuments': {}}, @@ -285,15 +287,10 @@ def test_client_can_retry_request_properly(): def _make_client_call_with_errors(client, operation_name, kwargs): operation = getattr(client, xform_name(operation_name)) - original_send = adapters.HTTPAdapter.send - def mock_http_adapter_send(self, *args, **kwargs): - if not getattr(self, '_integ_test_error_raised', False): - self._integ_test_error_raised = True - raise ConnectionError("Simulated ConnectionError raised.") - else: - return original_send(self, *args, **kwargs) - with mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - mock_http_adapter_send): + exception = ConnectionClosedError(endpoint_url='https://mock.eror') + with ClientHTTPStubber(client) as http_stubber: + http_stubber.responses.append(exception) + http_stubber.responses.append(None) try: response = operation(**kwargs) except ClientError as e: diff --git a/tests/unit/auth/test_signers.py b/tests/unit/auth/test_signers.py index d3dfab90..3b34942e 100644 --- a/tests/unit/auth/test_signers.py +++ b/tests/unit/auth/test_signers.py @@ -24,7 +24,6 @@ import botocore.auth import botocore.credentials from botocore.compat import HTTPHeaders, urlsplit, parse_qs, six from botocore.awsrequest import AWSRequest -from botocore.vendored.requests.models import Request class BaseTestWithFixedDate(unittest.TestCase): @@ -198,7 +197,7 @@ class TestSigV2(unittest.TestCase): u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q=')) def test_fields(self): - request = Request() + request = AWSRequest() request.url = '/' request.method = 'POST' request.data = {'Foo': u'\u2713'} @@ -213,7 +212,7 @@ class TestSigV2(unittest.TestCase): def test_resign(self): # Make sure that resigning after e.g. retries works - request = Request() + request = AWSRequest() request.url = '/' request.method = 'POST' params = { @@ -226,7 +225,7 @@ class TestSigV2(unittest.TestCase): u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q=')) def test_get(self): - request = Request() + request = AWSRequest() request.url = '/' request.method = 'GET' request.params = {'Foo': u'\u2713'} diff --git a/tests/unit/docs/__init__.py b/tests/unit/docs/__init__.py index 918cb5bd..30e318bf 100644 --- a/tests/unit/docs/__init__.py +++ b/tests/unit/docs/__init__.py @@ -92,7 +92,8 @@ class BaseDocsTest(unittest.TestCase): 'signatureVersion': 'v4', 'serviceFullName': 'AWS MyService', 'uid': 'myservice-2014-01-01', - 'protocol': 'query' + 'protocol': 'query', + 'serviceId': 'MyService', }, 'operations': { 'SampleOperation': { diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py index 2b854d40..7eb02508 100644 --- a/tests/unit/test_args.py +++ b/tests/unit/test_args.py @@ -16,12 +16,61 @@ from tests import unittest import mock from botocore import args +from botocore.client import ClientEndpointBridge from botocore.config import Config +from botocore.hooks import HierarchicalEmitter +from botocore.model import ServiceModel class TestCreateClientArgs(unittest.TestCase): def setUp(self): - self.args_create = args.ClientArgsCreator(None, None, None, None, None) + self.event_emitter = mock.Mock(HierarchicalEmitter) + self.args_create = args.ClientArgsCreator( + self.event_emitter, None, None, None, None) + self.region = 'us-west-2' + self.endpoint_url = 'https://ec2/' + self.service_model = mock.Mock(ServiceModel) + self.service_model.metadata = { + 'serviceFullName': 'MyService', + 'protocol': 'query' + } + self.service_model.operation_names = [] + self.bridge = mock.Mock(ClientEndpointBridge) + self.bridge.resolve.return_value = { + 'region_name': self.region, 'signature_version': 'v4', + 'endpoint_url': self.endpoint_url, + 'signing_name': 'ec2', 'signing_region': self.region, + 'metadata': {}} + + def call_get_client_args(self, **override_kwargs): + call_kwargs = { + 'service_model': self.service_model, + 'region_name': self.region, + 'is_secure': True, + 'endpoint_url': self.endpoint_url, + 'verify': True, + 'credentials': None, + 'scoped_config': {}, + 'client_config': None, + 'endpoint_bridge': self.bridge + } + call_kwargs.update(**override_kwargs) + return self.args_create.get_client_args(**call_kwargs) + + def assert_create_endpoint_call(self, mock_endpoint, **override_kwargs): + call_kwargs = { + 'endpoint_url': self.endpoint_url, + 'region_name': self.region, + 'response_parser_factory': None, + 'timeout': (60, 60), + 'verify': True, + 'max_pool_connections': 10, + 'proxies': None, + } + call_kwargs.update(**override_kwargs) + mock_endpoint.return_value.create_endpoint.assert_called_with( + self.service_model, **call_kwargs + ) def test_compute_s3_configuration(self): scoped_config = {} @@ -106,68 +155,23 @@ class TestCreateClientArgs(unittest.TestCase): ) def test_max_pool_from_client_config_forwarded_to_endpoint_creator(self): - args_create = args.ClientArgsCreator( - mock.Mock(), None, None, None, None) config = botocore.config.Config(max_pool_connections=20) - service_model = mock.Mock() - service_model.metadata = { - 'serviceFullName': 'MyService', - 'protocol': 'query' - } - service_model.operation_names = [] - bridge = mock.Mock() - bridge.resolve.return_value = { - 'region_name': 'us-west-2', 'signature_version': 'v4', - 'endpoint_url': 'https://ec2/', - 'signing_name': 'ec2', 'signing_region': 'us-west-2', - 'metadata': {}} with mock.patch('botocore.args.EndpointCreator') as m: - args_create.get_client_args( - service_model, 'us-west-2', True, 'https://ec2/', True, - None, {}, config, bridge) - m.return_value.create_endpoint.assert_called_with( - mock.ANY, endpoint_url='https://ec2/', region_name='us-west-2', - response_parser_factory=None, timeout=(60, 60), verify=True, - max_pool_connections=20, proxies=None - ) + self.call_get_client_args(client_config=config) + self.assert_create_endpoint_call(m, max_pool_connections=20) def test_proxies_from_client_config_forwarded_to_endpoint_creator(self): - args_create = args.ClientArgsCreator( - mock.Mock(), None, None, None, None) proxies = {'http': 'http://foo.bar:1234', 'https': 'https://foo.bar:4321'} config = botocore.config.Config(proxies=proxies) - service_model = mock.Mock() - service_model.metadata = { - 'serviceFullName': 'MyService', - 'protocol': 'query' - } - service_model.operation_names = [] - bridge = mock.Mock() - bridge.resolve.return_value = { - 'region_name': 'us-west-2', 'signature_version': 'v4', - 'endpoint_url': 'https://ec2/', - 'signing_name': 'ec2', 'signing_region': 'us-west-2', - 'metadata': {}} with mock.patch('botocore.args.EndpointCreator') as m: - args_create.get_client_args( - service_model, 'us-west-2', True, 'https://ec2/', True, - None, {}, config, bridge) - m.return_value.create_endpoint.assert_called_with( - mock.ANY, endpoint_url='https://ec2/', region_name='us-west-2', - response_parser_factory=None, timeout=(60, 60), verify=True, - proxies=proxies, max_pool_connections=10 - ) + self.call_get_client_args(client_config=config) + self.assert_create_endpoint_call(m, proxies=proxies) def test_s3_with_endpoint_url_still_resolves_region(self): - self.args_create = args.ClientArgsCreator( - mock.Mock(), None, None, None, None) - service_model = mock.Mock() - service_model.endpoint_prefix = 's3' - service_model.metadata = {'protocol': 'rest-xml'} - config = botocore.config.Config() - bridge = mock.Mock() - bridge.resolve.side_effect = [ + self.service_model.endpoint_prefix = 's3' + self.service_model.metadata = {'protocol': 'rest-xml'} + self.bridge.resolve.side_effect = [ { 'region_name': None, 'signature_version': 's3v4', 'endpoint_url': 'http://other.com/', 'signing_name': 's3', @@ -180,47 +184,25 @@ class TestCreateClientArgs(unittest.TestCase): 'metadata': {} } ] - client_args = self.args_create.get_client_args( - service_model, 'us-west-2', True, 'http://other.com/', True, None, - {}, config, bridge) + client_args = self.call_get_client_args( + endpoint_url='http://other.com/') self.assertEqual( client_args['client_config'].region_name, 'us-west-2') def test_region_does_not_resolve_if_not_s3_and_endpoint_url_provided(self): - self.args_create = args.ClientArgsCreator( - mock.Mock(), None, None, None, None) - service_model = mock.Mock() - service_model.endpoint_prefix = 'ec2' - service_model.metadata = {'protocol': 'query'} - config = botocore.config.Config() - bridge = mock.Mock() - bridge.resolve.side_effect = [{ + self.service_model.endpoint_prefix = 'ec2' + self.service_model.metadata = {'protocol': 'query'} + self.bridge.resolve.side_effect = [{ 'region_name': None, 'signature_version': 'v4', 'endpoint_url': 'http://other.com/', 'signing_name': 'ec2', 'signing_region': None, 'metadata': {} }] - client_args = self.args_create.get_client_args( - service_model, 'us-west-2', True, 'http://other.com/', True, None, - {}, config, bridge) + client_args = self.call_get_client_args( + endpoint_url='http://other.com/') self.assertEqual(client_args['client_config'].region_name, None) def test_provide_retry_config(self): - self.args_create = args.ClientArgsCreator( - mock.Mock(), None, None, None, None) - service_model = mock.Mock() - service_model.endpoint_prefix = 'ec2' - service_model.metadata = {'protocol': 'query'} - config = botocore.config.Config( - retries={'max_attempts': 10} - ) - bridge = mock.Mock() - bridge.resolve.side_effect = [{ - 'region_name': None, 'signature_version': 'v4', - 'endpoint_url': 'http://other.com/', 'signing_name': 'ec2', - 'signing_region': None, 'metadata': {} - }] - client_args = self.args_create.get_client_args( - service_model, 'us-west-2', True, 'https://ec2/', True, None, - {}, config, bridge) + config = botocore.config.Config(retries={'max_attempts': 10}) + client_args = self.call_get_client_args(client_config=config) self.assertEqual( client_args['client_config'].retries, {'max_attempts': 10}) diff --git a/tests/unit/test_auth_sigv4.py b/tests/unit/test_auth_sigv4.py new file mode 100644 index 00000000..7c253ee9 --- /dev/null +++ b/tests/unit/test_auth_sigv4.py @@ -0,0 +1,33 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from tests import unittest + +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials + +SECRET_KEY = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" +ACCESS_KEY = 'AKIDEXAMPLE' + + +class TestSigV4Auth(unittest.TestCase): + def setUp(self): + self.credentials = Credentials(ACCESS_KEY, SECRET_KEY) + self.sigv4 = SigV4Auth(self.credentials, 'host', 'us-weast-1') + + def test_signed_host_is_lowercase(self): + endpoint = 'https://S5.Us-WeAsT-2.AmAZonAwS.com' + expected_host = 's5.us-weast-2.amazonaws.com' + request = AWSRequest(method='GET', url=endpoint) + headers_to_sign = self.sigv4.headers_to_sign(request) + self.assertEqual(expected_host, headers_to_sign.get('host')) diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py index d37aa24d..a4408bf3 100644 --- a/tests/unit/test_awsrequest.py +++ b/tests/unit/test_awsrequest.py @@ -21,10 +21,11 @@ import socket import sys from mock import Mock, patch +from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool from botocore.exceptions import UnseekableStreamError -from botocore.awsrequest import AWSRequest, AWSPreparedRequest -from botocore.awsrequest import AWSHTTPConnection +from botocore.awsrequest import AWSRequest, AWSPreparedRequest, AWSResponse +from botocore.awsrequest import AWSHTTPConnection, AWSHTTPSConnection, HeadersDict from botocore.awsrequest import prepare_request_dict, create_request_object from botocore.compat import file_type, six @@ -98,53 +99,137 @@ class Seekable(object): class TestAWSRequest(unittest.TestCase): - def setUp(self): self.tempdir = tempfile.mkdtemp() - self.request = AWSRequest(url='http://example.com') - self.prepared_request = self.request.prepare() self.filename = os.path.join(self.tempdir, 'foo') + self.request = AWSRequest(method='GET', url='http://example.com') + self.prepared_request = self.request.prepare() def tearDown(self): shutil.rmtree(self.tempdir) - def test_should_reset_stream(self): + def test_prepared_request_repr(self): + expected_repr = ( + '' + ) + request_repr = repr(self.prepared_request) + self.assertEqual(request_repr, expected_repr) + + def test_can_prepare_url_params(self): + request = AWSRequest(url='http://example.com/', params={'foo': 'bar'}) + prepared_request = request.prepare() + self.assertEqual(prepared_request.url, 'http://example.com/?foo=bar') + + def test_can_prepare_dict_body(self): + body = {'dead': 'beef'} + request = AWSRequest(url='http://example.com/', data=body) + prepared_request = request.prepare() + self.assertEqual(prepared_request.body, 'dead=beef') + + def test_can_prepare_dict_body_unicode_values(self): + body = {'Text': u'\u30c6\u30b9\u30c8 string'} + expected_body = 'Text=%E3%83%86%E3%82%B9%E3%83%88+string' + request = AWSRequest(url='http://example.com/', data=body) + prepared_request = request.prepare() + self.assertEqual(prepared_request.body, expected_body) + + def test_can_prepare_dict_body_unicode_keys(self): + body = {u'\u30c6\u30b9\u30c8': 'string'} + expected_body = '%E3%83%86%E3%82%B9%E3%83%88=string' + request = AWSRequest(url='http://example.com/', data=body) + prepared_request = request.prepare() + self.assertEqual(prepared_request.body, expected_body) + + def test_can_prepare_empty_body(self): + request = AWSRequest(url='http://example.com/', data=b'') + prepared_request = request.prepare() + self.assertEqual(prepared_request.body, None) + content_length = prepared_request.headers.get('content-length') + self.assertEqual(content_length, '0') + + def test_request_body_is_prepared(self): + request = AWSRequest(url='http://example.com/', data='body') + self.assertEqual(request.body, b'body') + + def test_prepare_body_content_adds_content_length(self): + content = b'foobarbaz' + expected_len = str(len(content)) with open(self.filename, 'wb') as f: - f.write(b'foobarbaz') + f.write(content) + with open(self.filename, 'rb') as f: + data = Seekable(f) + self.request.data = data + self.request.method = 'POST' + prepared_request = self.request.prepare() + calculated_len = prepared_request.headers['Content-Length'] + self.assertEqual(calculated_len, expected_len) + + def test_prepare_body_doesnt_override_content_length(self): + self.request.method = 'PUT' + self.request.headers['Content-Length'] = '20' + self.request.data = b'asdf' + prepared_request = self.request.prepare() + self.assertEqual(prepared_request.headers['Content-Length'], '20') + + def test_prepare_body_doesnt_set_content_length_head(self): + self.request.method = 'HEAD' + self.request.data = b'thisshouldntbehere' + prepared_request = self.request.prepare() + self.assertEqual(prepared_request.headers.get('Content-Length'), None) + + def test_prepare_body_doesnt_set_content_length_get(self): + self.request.method = 'GET' + self.request.data = b'thisshouldntbehere' + prepared_request = self.request.prepare() + self.assertEqual(prepared_request.headers.get('Content-Length'), None) + + def test_prepare_body_doesnt_set_content_length_options(self): + self.request.method = 'OPTIONS' + self.request.data = b'thisshouldntbehere' + prepared_request = self.request.prepare() + self.assertEqual(prepared_request.headers.get('Content-Length'), None) + + def test_can_reset_stream_handles_binary(self): + contents = b'notastream' + self.prepared_request.body = contents + self.prepared_request.reset_stream() + # assert the request body doesn't change after reset_stream is called + self.assertEqual(self.prepared_request.body, contents) + + def test_can_reset_stream_handles_bytearray(self): + contents = bytearray(b'notastream') + self.prepared_request.body = contents + self.prepared_request.reset_stream() + # assert the request body doesn't change after reset_stream is called + self.assertEqual(self.prepared_request.body, contents) + + def test_can_reset_stream(self): + contents = b'foobarbaz' + with open(self.filename, 'wb') as f: + f.write(contents) with open(self.filename, 'rb') as body: self.prepared_request.body = body - - # Now pretend we try to send the request. - # This means that we read the body: + # pretend the request body was partially sent body.read() - # And create a response object that indicates - # a redirect. - fake_response = Mock() - fake_response.status_code = 307 - - # Then requests calls our reset_stream hook. - self.prepared_request.reset_stream_on_redirect(fake_response) - - # The stream should now be reset. + self.assertNotEqual(body.tell(), 0) + # have the prepared request reset its stream + self.prepared_request.reset_stream() + # the stream should be reset self.assertEqual(body.tell(), 0) def test_cannot_reset_stream_raises_error(self): + contents = b'foobarbaz' with open(self.filename, 'wb') as f: - f.write(b'foobarbaz') + f.write(contents) with open(self.filename, 'rb') as body: self.prepared_request.body = Unseekable(body) - - # Now pretend we try to send the request. - # This means that we read the body: + # pretend the request body was partially sent body.read() - # And create a response object that indicates - # a redirect - fake_response = Mock() - fake_response.status_code = 307 - - # Then requests calls our reset_stream hook. + self.assertNotEqual(body.tell(), 0) + # reset stream should fail with self.assertRaises(UnseekableStreamError): - self.prepared_request.reset_stream_on_redirect(fake_response) + self.prepared_request.reset_stream() def test_duck_type_for_file_check(self): # As part of determining whether or not we can rewind a stream @@ -154,76 +239,43 @@ class TestAWSRequest(unittest.TestCase): class LooksLikeFile(object): def __init__(self): self.seek_called = False - def read(self, amount=None): pass - def seek(self, where): self.seek_called = True - looks_like_file = LooksLikeFile() self.prepared_request.body = looks_like_file - - fake_response = Mock() - fake_response.status_code = 307 - - # Then requests calls our reset_stream hook. - self.prepared_request.reset_stream_on_redirect(fake_response) - + self.prepared_request.reset_stream() # The stream should now be reset. self.assertTrue(looks_like_file.seek_called) -class TestAWSPreparedRequest(unittest.TestCase): +class TestAWSResponse(unittest.TestCase): def setUp(self): - self.tempdir = tempfile.mkdtemp() - self.filename = os.path.join(self.tempdir, 'foo') - self.request = AWSRequest(url='http://example.com') - self.prepared_request = AWSPreparedRequest(self.request) - self.prepared_request.prepare_headers(self.request.headers) + self.response = AWSResponse('http://url.com', 200, HeadersDict(), None) + self.response.raw = Mock() - def tearDown(self): - shutil.rmtree(self.tempdir) + def set_raw_stream(self, blobs): + def stream(*args, **kwargs): + for blob in blobs: + yield blob + self.response.raw.stream.return_value = stream() - def test_prepare_body_content_adds_content_length(self): - content = b'foobarbaz' - with open(self.filename, 'wb') as f: - f.write(content) - with open(self.filename, 'rb') as f: - data = Seekable(f) - self.prepared_request.prepare_body(data=data, files=None) - self.assertEqual( - self.prepared_request.headers['Content-Length'], - str(len(content))) + def test_content_property(self): + self.set_raw_stream([b'some', b'data']) + self.assertEqual(self.response.content, b'somedata') + self.assertEqual(self.response.content, b'somedata') + # assert that stream was not called more than once + self.assertEqual(self.response.raw.stream.call_count, 1) - def test_prepare_body_removes_transfer_encoding(self): - self.prepared_request.headers['Transfer-Encoding'] = 'chunked' - content = b'foobarbaz' - with open(self.filename, 'wb') as f: - f.write(content) - with open(self.filename, 'rb') as f: - data = Seekable(f) - self.prepared_request.prepare_body(data=data, files=None) - self.assertEqual( - self.prepared_request.headers['Content-Length'], - str(len(content))) - self.assertNotIn('Transfer-Encoding', self.prepared_request.headers) + def test_text_property(self): + self.set_raw_stream([b'\xe3\x82\xb8\xe3\x83\xa7\xe3\x82\xb0']) + self.response.headers['content-type'] = 'text/plain; charset=utf-8' + self.assertEquals(self.response.text, u'\u30b8\u30e7\u30b0') - def test_prepare_body_ignores_existing_transfer_encoding(self): - content = b'foobarbaz' - self.prepared_request.headers['Transfer-Encoding'] = 'chunked' - with open(self.filename, 'wb') as f: - f.write(content) - with open(self.filename, 'rb') as f: - self.prepared_request.prepare_body(data=f, files=None) - # The Transfer-Encoding should not be removed if Content-Length - # is not added via the custom logic in the ``prepare_body`` method. - # Note requests' ``prepare_body`` is the method that adds the - # Content-Length header for this case as the ``data`` is a - # regular file handle. - self.assertEqual( - self.prepared_request.headers['Transfer-Encoding'], - 'chunked') + def test_text_property_defaults_utf8(self): + self.set_raw_stream([b'\xe3\x82\xb8\xe3\x83\xa7\xe3\x82\xb0']) + self.assertEquals(self.response.text, u'\u30b8\u30e7\u30b0') class TestAWSHTTPConnection(unittest.TestCase): @@ -265,32 +317,36 @@ class TestAWSHTTPConnection(unittest.TestCase): return conn def test_expect_100_continue_returned(self): - with patch('select.select') as select_mock: + with patch('urllib3.util.wait_for_read') as wait_mock: # Shows the server first sending a 100 continue response # then a 200 ok response. s = FakeSocket(b'HTTP/1.1 100 Continue\r\n\r\nHTTP/1.1 200 OK\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) conn.sock = s - select_mock.return_value = ([s], [], []) + wait_mock.return_value = True conn.request('GET', '/bucket/foo', b'body', - {'Expect': '100-continue'}) + {'Expect': b'100-continue'}) response = conn.getresponse() + # Assert that we waited for the 100-continue response + self.assertEqual(wait_mock.call_count, 1) # Now we should verify that our final response is the 200 OK self.assertEqual(response.status, 200) def test_handles_expect_100_with_different_reason_phrase(self): - with patch('select.select') as select_mock: + with patch('urllib3.util.wait_for_read') as wait_mock: # Shows the server first sending a 100 continue response # then a 200 ok response. s = FakeSocket(b'HTTP/1.1 100 (Continue)\r\n\r\nHTTP/1.1 200 OK\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) conn.sock = s - select_mock.return_value = ([s], [], []) + wait_mock.return_value = True conn.request('GET', '/bucket/foo', six.BytesIO(b'body'), - {'Expect': '100-continue', 'Content-Length': '4'}) + {'Expect': b'100-continue', 'Content-Length': b'4'}) response = conn.getresponse() # Now we should verify that our final response is the 200 OK. self.assertEqual(response.status, 200) + # Assert that we waited for the 100-continue response + self.assertEqual(wait_mock.call_count, 1) # Verify that we went the request body because we got a 100 # continue. self.assertIn(b'body', s.sent_data) @@ -299,7 +355,7 @@ class TestAWSHTTPConnection(unittest.TestCase): # When using squid as an HTTP proxy, it will also send # a Connection: keep-alive header back with the 100 continue # response. We need to ensure we handle this case. - with patch('select.select') as select_mock: + with patch('urllib3.util.wait_for_read') as wait_mock: # Shows the server first sending a 100 continue response # then a 500 response. We're picking 500 to confirm we # actually parse the response instead of getting the @@ -311,16 +367,18 @@ class TestAWSHTTPConnection(unittest.TestCase): b'HTTP/1.1 500 Internal Service Error\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) conn.sock = s - select_mock.return_value = ([s], [], []) + wait_mock.return_value = True conn.request('GET', '/bucket/foo', b'body', - {'Expect': '100-continue'}) + {'Expect': b'100-continue'}) + # Assert that we waited for the 100-continue response + self.assertEqual(wait_mock.call_count, 1) response = conn.getresponse() self.assertEqual(response.status, 500) def test_expect_100_continue_sends_307(self): # This is the case where we send a 100 continue and the server # immediately sends a 307 - with patch('select.select') as select_mock: + with patch('urllib3.util.wait_for_read') as wait_mock: # Shows the server first sending a 100 continue response # then a 200 ok response. s = FakeSocket( @@ -328,15 +386,17 @@ class TestAWSHTTPConnection(unittest.TestCase): b'Location: http://example.org\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) conn.sock = s - select_mock.return_value = ([s], [], []) + wait_mock.return_value = True conn.request('GET', '/bucket/foo', b'body', - {'Expect': '100-continue'}) + {'Expect': b'100-continue'}) + # Assert that we waited for the 100-continue response + self.assertEqual(wait_mock.call_count, 1) response = conn.getresponse() # Now we should verify that our final response is the 307. self.assertEqual(response.status, 307) def test_expect_100_continue_no_response_from_server(self): - with patch('select.select') as select_mock: + with patch('urllib3.util.wait_for_read') as wait_mock: # Shows the server first sending a 100 continue response # then a 200 ok response. s = FakeSocket( @@ -344,12 +404,14 @@ class TestAWSHTTPConnection(unittest.TestCase): b'Location: http://example.org\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) conn.sock = s - # By settings select_mock to return empty lists, this indicates + # By settings wait_mock to return False, this indicates # that the server did not send any response. In this situation # we should just send the request anyways. - select_mock.return_value = ([], [], []) + wait_mock.return_value = False conn.request('GET', '/bucket/foo', b'body', - {'Expect': '100-continue'}) + {'Expect': b'100-continue'}) + # Assert that we waited for the 100-continue response + self.assertEqual(wait_mock.call_count, 1) response = conn.getresponse() self.assertEqual(response.status, 307) @@ -418,8 +480,7 @@ class TestAWSHTTPConnection(unittest.TestCase): conn.sock = s # Test that the standard library method was used by patching out # the ``_tunnel`` method and seeing if the std lib method was called. - with patch('botocore.vendored.requests.packages.urllib3.connection.' - 'HTTPConnection._tunnel') as mock_tunnel: + with patch('urllib3.connection.HTTPConnection._tunnel') as mock_tunnel: conn._tunnel() self.assertTrue(mock_tunnel.called) @@ -437,17 +498,18 @@ class TestAWSHTTPConnection(unittest.TestCase): def test_state_reset_on_connection_close(self): # This simulates what urllib3 does with connections # in its connection pool logic. - with patch('select.select') as select_mock: + with patch('urllib3.util.wait_for_read') as wait_mock: # First fast fail with a 500 response when we first # send the expect header. s = FakeSocket(b'HTTP/1.1 500 Internal Server Error\r\n') conn = AWSHTTPConnection('s3.amazonaws.com', 443) conn.sock = s - select_mock.return_value = ([s], [], []) + wait_mock.return_value = True conn.request('GET', '/bucket/foo', b'body', - {'Expect': '100-continue'}) + {'Expect': b'100-continue'}) + self.assertEqual(wait_mock.call_count, 1) response = conn.getresponse() self.assertEqual(response.status, 500) @@ -464,10 +526,12 @@ class TestAWSHTTPConnection(unittest.TestCase): # And we make a request, we should see the 200 response # that was sent back. - select_mock.return_value = ([new_conn], [], []) + wait_mock.return_value = True conn.request('GET', '/bucket/foo', b'body', - {'Expect': '100-continue'}) + {'Expect': b'100-continue'}) + # Assert that we waited for the 100-continue response + self.assertEqual(wait_mock.call_count, 2) response = conn.getresponse() # This should be 200. If it's a 500 then # the prior response was leaking into our @@ -475,6 +539,14 @@ class TestAWSHTTPConnection(unittest.TestCase): self.assertEqual(response.status, 200) +class TestAWSHTTPConnectionPool(unittest.TestCase): + def test_global_urllib3_pool_is_unchanged(self): + http_connection_class = HTTPConnectionPool.ConnectionCls + self.assertIsNot(http_connection_class, AWSHTTPConnection) + https_connection_class = HTTPSConnectionPool.ConnectionCls + self.assertIsNot(https_connection_class, AWSHTTPSConnection) + + class TestPrepareRequestDict(unittest.TestCase): def setUp(self): self.user_agent = 'botocore/1.0' @@ -611,5 +683,35 @@ class TestCreateRequestObject(unittest.TestCase): self.assertIn('User-Agent', request.headers) +class TestHeadersDict(unittest.TestCase): + def setUp(self): + self.headers = HeadersDict() + + def test_get_insensitive(self): + self.headers['foo'] = 'bar' + self.assertEqual(self.headers['FOO'], 'bar') + + def test_set_insensitive(self): + self.headers['foo'] = 'bar' + self.headers['FOO'] = 'baz' + self.assertEqual(self.headers['foo'], 'baz') + + def test_del_insensitive(self): + self.headers['foo'] = 'bar' + self.assertEqual(self.headers['FOO'], 'bar') + del self.headers['FoO'] + with self.assertRaises(KeyError): + self.headers['foo'] + + def test_iteration(self): + self.headers['FOO'] = 'bar' + self.headers['dead'] = 'beef' + self.assertIn('FOO', list(self.headers)) + self.assertIn('dead', list(self.headers)) + headers_items = list(self.headers.items()) + self.assertIn(('FOO', 'bar'), headers_items) + self.assertIn(('dead', 'beef'), headers_items) + + if __name__ == "__main__": unittest.main() diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index b3bb6e9a..62e814df 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -41,7 +41,8 @@ class TestAutoGeneratedClient(unittest.TestCase): 'apiVersion': '2014-01-01', 'endpointPrefix': 'myservice', 'signatureVersion': 'v4', - 'protocol': 'query' + 'protocol': 'query', + 'serviceId': 'MyService', }, 'operations': { 'TestOperation': { @@ -381,7 +382,7 @@ class TestAutoGeneratedClient(unittest.TestCase): service_name='myservice', region_name='us-west-2', credentials=self.credentials) call_args = mock_signer.call_args - self.assertEqual('myservice', call_args[0][0]) + self.assertEqual('MyService', call_args[0][0]) self.assertEqual('override', call_args[0][2]) def test_client_uses_given_region_name_and_endpoint_url_when_present(self): diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py index fff84b07..0cc8ab10 100644 --- a/tests/unit/test_endpoint.py +++ b/tests/unit/test_endpoint.py @@ -20,9 +20,10 @@ from botocore.compat import six from botocore.awsrequest import AWSRequest from botocore.endpoint import Endpoint, DEFAULT_TIMEOUT from botocore.endpoint import EndpointCreator -from botocore.endpoint import BotocoreHTTPSession from botocore.exceptions import EndpointConnectionError from botocore.exceptions import ConnectionClosedError +from botocore.httpsession import URLLib3Session +from botocore.model import OperationModel, ServiceId def request_dict(): @@ -42,9 +43,9 @@ class RecordStreamResets(six.StringIO): six.StringIO.__init__(self, value) self.total_resets = 0 - def seek(self, where): + def seek(self, where, whence=0): self.total_resets += 1 - six.StringIO.seek(self, where) + six.StringIO.seek(self, where, whence) class TestEndpointBase(unittest.TestCase): @@ -75,22 +76,6 @@ class TestEndpointBase(unittest.TestCase): class TestEndpointFeatures(TestEndpointBase): - def test_timeout_can_be_specified(self): - timeout_override = 120 - self.endpoint.timeout = timeout_override - self.endpoint.make_request(self.op, request_dict()) - kwargs = self.http_session.send.call_args[1] - self.assertEqual(kwargs['timeout'], timeout_override) - - def test_make_request_with_proxies(self): - proxies = {'http': 'http://localhost:8888'} - self.endpoint.proxies = proxies - self.endpoint.make_request(self.op, request_dict()) - prepared_request = self.http_session.send.call_args[0][0] - self.http_session.send.assert_called_with( - prepared_request, verify=True, stream=False, - proxies=proxies, timeout=DEFAULT_TIMEOUT) - def test_make_request_with_no_auth(self): self.endpoint.auth = None self.endpoint.make_request(self.op, request_dict()) @@ -108,23 +93,6 @@ class TestEndpointFeatures(TestEndpointBase): prepared_request = self.http_session.send.call_args[0][0] self.assertNotIn('Authorization', prepared_request.headers) - def test_make_request_injects_better_dns_error_msg(self): - fake_request = Mock(url='https://ec2.us-west-2.amazonaws.com') - self.http_session.send.side_effect = ConnectionError( - "Fake gaierror(8, node or host not known)", request=fake_request) - with self.assertRaisesRegexp(EndpointConnectionError, - 'Could not connect'): - self.endpoint.make_request(self.op, request_dict()) - - def test_make_request_injects_better_bad_status_line_error_msg(self): - fake_request = Mock(url='https://ec2.us-west-2.amazonaws.com') - self.http_session.send.side_effect = ConnectionError( - """'Connection aborted.', BadStatusLine("''",)""", - request=fake_request) - with self.assertRaisesRegexp(ConnectionClosedError, - 'Connection was closed'): - self.endpoint.make_request(self.op, request_dict()) - def test_make_request_with_context(self): r = request_dict() r['context'] = {'signing': {'region': 'us-west-2'}} @@ -133,27 +101,15 @@ class TestEndpointFeatures(TestEndpointBase): request = prepare.call_args[0][0] self.assertEqual(request.context['signing']['region'], 'us-west-2') - def test_can_specify_max_pool_connections(self): - endpoint = Endpoint('https://ec2.us-west-2.amazonaws.com', 'ec2', - self.event_emitter, max_pool_connections=50) - # We can look in endpoint.http_session.adapters[0]._pool_maxsize, - # but that feels like testing too much implementation detail. - self.assertEqual(endpoint.max_pool_connections, 50) - - def test_can_specify_proxies(self): - proxies = {'http': 'http://foo.bar:1234'} - endpoint = Endpoint('https://ec2.us-west-2.amazonaws.com', 'ec2', - self.event_emitter, proxies=proxies) - self.assertEqual(endpoint.proxies, proxies) - - class TestRetryInterface(TestEndpointBase): def setUp(self): super(TestRetryInterface, self).setUp() self.retried_on_exception = None + self._operation = Mock(spec=OperationModel) + self._operation.service_model.service_id = ServiceId('ec2') def test_retry_events_are_emitted(self): - op = Mock() + op = self._operation op.name = 'DescribeInstances' op.metadata = {'protocol': 'query'} op.has_streaming_output = False @@ -164,52 +120,64 @@ class TestRetryInterface(TestEndpointBase): 'needs-retry.ec2.DescribeInstances') def test_retry_events_can_alter_behavior(self): - op = Mock() + op = self._operation op.name = 'DescribeInstances' op.metadata = {'protocol': 'json'} op.has_event_stream_output = False self.event_emitter.emit.side_effect = [ [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, 0)], # Check if retry needed. Retry needed. [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, None)] # Check if retry needed. Retry not needed. ] self.endpoint.make_request(op, request_dict()) call_args = self.event_emitter.emit.call_args_list - self.assertEqual(self.event_emitter.emit.call_count, 4) + self.assertEqual(self.event_emitter.emit.call_count, 6) # Check that all of the events are as expected. self.assertEqual(call_args[0][0][0], 'request-created.ec2.DescribeInstances') self.assertEqual(call_args[1][0][0], - 'needs-retry.ec2.DescribeInstances') + 'before-send.ec2.DescribeInstances') self.assertEqual(call_args[2][0][0], - 'request-created.ec2.DescribeInstances') + 'needs-retry.ec2.DescribeInstances') self.assertEqual(call_args[3][0][0], + 'request-created.ec2.DescribeInstances') + self.assertEqual(call_args[4][0][0], + 'before-send.ec2.DescribeInstances') + self.assertEqual(call_args[5][0][0], 'needs-retry.ec2.DescribeInstances') def test_retry_on_socket_errors(self): - op = Mock() + op = self._operation op.name = 'DescribeInstances' op.has_event_stream_output = False self.event_emitter.emit.side_effect = [ [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, 0)], # Check if retry needed. Retry needed. [(None, None)], # Request created + [(None, None)], # Request sent. [(None, None)] # Check if retry needed. Retry not needed. ] self.http_session.send.side_effect = ConnectionError() with self.assertRaises(ConnectionError): self.endpoint.make_request(op, request_dict()) call_args = self.event_emitter.emit.call_args_list - self.assertEqual(self.event_emitter.emit.call_count, 4) + self.assertEqual(self.event_emitter.emit.call_count, 6) # Check that all of the events are as expected. self.assertEqual(call_args[0][0][0], 'request-created.ec2.DescribeInstances') self.assertEqual(call_args[1][0][0], - 'needs-retry.ec2.DescribeInstances') + 'before-send.ec2.DescribeInstances') self.assertEqual(call_args[2][0][0], - 'request-created.ec2.DescribeInstances') + 'needs-retry.ec2.DescribeInstances') self.assertEqual(call_args[3][0][0], + 'request-created.ec2.DescribeInstances') + self.assertEqual(call_args[4][0][0], + 'before-send.ec2.DescribeInstances') + self.assertEqual(call_args[5][0][0], 'needs-retry.ec2.DescribeInstances') def test_retry_attempts_added_to_response_metadata(self): @@ -218,8 +186,10 @@ class TestRetryInterface(TestEndpointBase): op.has_event_stream_output = False self.event_emitter.emit.side_effect = [ [(None, None)], # Request created. + [(None, None)], # Request sent [(None, 0)], # Check if retry needed. Retry needed. [(None, None)], # Request created. + [(None, None)], # Request sent [(None, None)] # Check if retry needed. Retry not needed. ] parser = Mock() @@ -233,6 +203,7 @@ class TestRetryInterface(TestEndpointBase): op.has_event_stream_output = False self.event_emitter.emit.side_effect = [ [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, None)], # Check if retry needed. Retry needed. ] parser = Mock() @@ -267,14 +238,18 @@ class TestS3ResetStreamOnRetry(TestEndpointBase): request['body'] = body self.event_emitter.emit.side_effect = [ [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, 0)], # Check if retry needed. Needs Retry. [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, 0)], # Check if retry needed again. Needs Retry. [(None, None)], # Request created. + [(None, None)], # Request sent. [(None, None)], # Finally emit no rety is needed. ] self.endpoint.make_request(op, request) - self.assertEqual(body.total_resets, 2) + # 2 seeks for the resets and 6 (2 per creation) for content-length + self.assertEqual(body.total_resets, 8) class TestEventStreamBody(TestEndpointBase): @@ -283,8 +258,8 @@ class TestEventStreamBody(TestEndpointBase): self.op.has_event_stream_output = True request = request_dict() self.endpoint.make_request(self.op, request) - args = self.http_session.send.call_args[1] - self.assertTrue(args.get('stream')) + sent_request = self.http_session.send.call_args[0][0] + self.assertTrue(sent_request.stream_output) class TestEndpointCreator(unittest.TestCase): @@ -296,6 +271,7 @@ class TestEndpointCreator(unittest.TestCase): self.environ_patch = patch('os.environ', self.environ) self.environ_patch.start() self.creator = EndpointCreator(Mock()) + self.mock_session = Mock(spec=URLLib3Session) def tearDown(self): self.environ_patch.stop() @@ -309,96 +285,68 @@ class TestEndpointCreator(unittest.TestCase): def test_create_endpoint_with_default_timeout(self): endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com') - self.assertEqual(endpoint.timeout, DEFAULT_TIMEOUT) + endpoint_url='https://example.com', + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] + self.assertEqual(session_args.get('timeout'), DEFAULT_TIMEOUT) def test_create_endpoint_with_customized_timeout(self): endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com', timeout=123) - self.assertEqual(endpoint.timeout, 123) + endpoint_url='https://example.com', timeout=123, + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] + self.assertEqual(session_args.get('timeout'), 123) def test_get_endpoint_default_verify_ssl(self): endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com') - self.assertTrue(endpoint.verify) + endpoint_url='https://example.com', + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] + self.assertTrue(session_args.get('verify')) def test_verify_ssl_can_be_disabled(self): endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com', verify=False) - self.assertFalse(endpoint.verify) + endpoint_url='https://example.com', verify=False, + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] + self.assertFalse(session_args.get('verify')) def test_verify_ssl_can_specify_cert_bundle(self): endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com', verify='/path/cacerts.pem') - self.assertEqual(endpoint.verify, '/path/cacerts.pem') + endpoint_url='https://example.com', verify='/path/cacerts.pem', + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] + self.assertEqual(session_args.get('verify'), '/path/cacerts.pem') def test_honor_cert_bundle_env_var(self): self.environ['REQUESTS_CA_BUNDLE'] = '/env/cacerts.pem' endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com') - self.assertEqual(endpoint.verify, '/env/cacerts.pem') + endpoint_url='https://example.com', + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] + self.assertEqual(session_args.get('verify'), '/env/cacerts.pem') def test_env_ignored_if_explicitly_passed(self): self.environ['REQUESTS_CA_BUNDLE'] = '/env/cacerts.pem' endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', - endpoint_url='https://example.com', verify='/path/cacerts.pem') + endpoint_url='https://example.com', verify='/path/cacerts.pem', + http_session_cls=self.mock_session) + session_args = self.mock_session.call_args[1] # /path/cacerts.pem wins over the value from the env var. - self.assertEqual(endpoint.verify, '/path/cacerts.pem') + self.assertEqual(session_args.get('verify'), '/path/cacerts.pem') def test_can_specify_max_pool_conns(self): endpoint = self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', - max_pool_connections=100 + max_pool_connections=100, + http_session_cls=self.mock_session, ) - self.assertEqual(endpoint.max_pool_connections, 100) - - -class TestAWSSession(unittest.TestCase): - def test_auth_header_preserved_from_s3_redirects(self): - request = AWSRequest() - request.url = 'https://bucket.s3.amazonaws.com/' - request.method = 'GET' - request.headers['Authorization'] = 'original auth header' - prepared_request = request.prepare() - - fake_response = Mock() - fake_response.headers = { - 'location': 'https://bucket.s3-us-west-2.amazonaws.com'} - fake_response.url = request.url - fake_response.status_code = 307 - fake_response.is_permanent_redirect = False - # This line is needed to disable the cookie handling - # code in requests. - fake_response.raw._original_response = None - - success_response = Mock() - success_response.raw._original_response = None - success_response.is_redirect = False - success_response.status_code = 200 - session = BotocoreHTTPSession() - session.send = Mock(return_value=success_response) - - list(session.resolve_redirects( - fake_response, prepared_request, stream=False)) - - redirected_request = session.send.call_args[0][0] - # The Authorization header for the newly sent request should - # still have our original Authorization header. - self.assertEqual( - redirected_request.headers['Authorization'], - 'original auth header') - - def test_max_pool_conns_injects_custom_adapter(self): - http_adapter_cls = Mock(return_value=sentinel.HTTP_ADAPTER) - session = BotocoreHTTPSession(max_pool_connections=20, - http_adapter_cls=http_adapter_cls) - http_adapter_cls.assert_called_with(pool_maxsize=20) - self.assertEqual(session.adapters['https://'], sentinel.HTTP_ADAPTER) - self.assertEqual(session.adapters['http://'], sentinel.HTTP_ADAPTER) + session_args = self.mock_session.call_args[1] + self.assertEqual(session_args.get('max_pool_connections'), 100) diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index 8f461917..38d09a5e 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -31,7 +31,7 @@ from botocore.docs.bcdoc.restdoc import DocumentStructure from botocore.docs.params import RequestParamsDocumenter from botocore.docs.example import RequestExampleDocumenter from botocore.hooks import HierarchicalEmitter -from botocore.model import OperationModel, ServiceModel +from botocore.model import OperationModel, ServiceModel, ServiceId from botocore.model import DenormalizedStructureBuilder from botocore.signers import RequestSigner from botocore.credentials import Credentials @@ -139,7 +139,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'ec2', 'us-east-1', 'ec2', 'v4', credentials, event_emitter) + ServiceId('ec2'), 'us-east-1', 'ec2', 'v4', + credentials, event_emitter) handlers.inject_presigned_url_ec2( params, request_signer, operation_model) self.assertEqual(params['body']['PresignedUrl'], 'https://foo') @@ -156,7 +157,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'ec2', 'us-east-1', 'ec2', 'v4', credentials, event_emitter) + ServiceId('ec2'), 'us-east-1', 'ec2', 'v4', credentials, + event_emitter) handlers.inject_presigned_url_ec2( params, request_signer, operation_model) self.assertEqual(params['body']['PresignedUrl'], 'https://foo') @@ -169,7 +171,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) handlers.inject_presigned_url_rds( params, request_signer, operation_model) self.assertEqual(params['body']['PreSignedUrl'], 'https://foo') @@ -186,7 +189,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) handlers.inject_presigned_url_rds( params, request_signer, operation_model) self.assertEqual(params['body']['PreSignedUrl'], 'https://foo') @@ -198,7 +202,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'ec2', 'us-east-1', 'ec2', 'v4', credentials, event_emitter) + ServiceId('ec2'), 'us-east-1', 'ec2', 'v4', credentials, + event_emitter) request_dict = {} params = {'SourceRegion': 'us-west-2'} request_dict['body'] = params @@ -250,7 +255,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'ec2', actual_region, 'ec2', 'v4', credentials, event_emitter) + ServiceId('ec2'), actual_region, 'ec2', 'v4', credentials, + event_emitter) request_dict = {} params = { 'SourceRegion': 'us-west-2', @@ -279,7 +285,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) request_dict = {} params = {'SourceRegion': 'us-west-2'} request_dict['body'] = params @@ -305,7 +312,9 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter + ) request_dict = {} params = {'SourceRegion': 'us-west-2'} request_dict['body'] = params @@ -328,7 +337,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) request_dict = {} params = {'SourceRegion': 'us-west-2', 'PreSignedUrl': 'https://foo'} request_dict['body'] = params @@ -351,7 +361,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) request_dict = {} params = {'SourceRegion': 'us-west-2'} request_dict['body'] = params @@ -375,7 +386,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) handlers.inject_presigned_url_rds( params=params, request_signer=request_signer, @@ -389,7 +401,8 @@ class TestHandlers(BaseSessionTest): credentials = Credentials('key', 'secret') event_emitter = HierarchicalEmitter() request_signer = RequestSigner( - 'rds', 'us-east-1', 'rds', 'v4', credentials, event_emitter) + ServiceId('rds'), 'us-east-1', 'rds', 'v4', credentials, + event_emitter) request_dict = {} params = {'SourceRegion': 'us-west-2'} request_dict['body'] = params @@ -536,7 +549,7 @@ class TestHandlers(BaseSessionTest): 'UserData': b64_user_data} self.assertEqual(params, result) - def test_register_retry_for_handlers_with_no_endpoint_prefix(self): + def test_register_retry_for_handlers_with_no_metadata(self): no_endpoint_prefix = {'metadata': {}} session = mock.Mock() handlers.register_retries_for_service(service_data=no_endpoint_prefix, @@ -546,7 +559,10 @@ class TestHandlers(BaseSessionTest): def test_register_retry_handlers(self): service_data = { - 'metadata': {'endpointPrefix': 'foo'}, + 'metadata': { + 'endpointPrefix': 'foo', + 'serviceId': 'foo', + }, } session = mock.Mock() loader = mock.Mock() @@ -857,6 +873,70 @@ class TestHandlers(BaseSessionTest): handlers.decode_list_object(parsed, context=context) self.assertEqual(parsed['Delimiter'], u'\xe7\xf6s% asd\x08 c') + def test_decode_list_objects_v2(self): + parsed = { + 'Contents': [{'Key': "%C3%A7%C3%B6s%25asd%08"}], + 'EncodingType': 'url', + } + context = {'encoding_type_auto_set': True} + handlers.decode_list_object_v2(parsed, context=context) + self.assertEqual(parsed['Contents'][0]['Key'], u'\xe7\xf6s%asd\x08') + + def test_decode_list_objects_v2_does_not_decode_without_context(self): + parsed = { + 'Contents': [{'Key': "%C3%A7%C3%B6s%25asd"}], + 'EncodingType': 'url', + } + handlers.decode_list_object_v2(parsed, context={}) + self.assertEqual(parsed['Contents'][0]['Key'], u'%C3%A7%C3%B6s%25asd') + + def test_decode_list_objects_v2_with_delimiter(self): + parsed = { + 'Delimiter': "%C3%A7%C3%B6s%25%20asd%08+c", + 'EncodingType': 'url', + } + context = {'encoding_type_auto_set': True} + handlers.decode_list_object_v2(parsed, context=context) + self.assertEqual(parsed['Delimiter'], u'\xe7\xf6s% asd\x08 c') + + def test_decode_list_objects_v2_with_prefix(self): + parsed = { + 'Prefix': "%C3%A7%C3%B6s%25%20asd%08+c", + 'EncodingType': 'url', + } + context = {'encoding_type_auto_set': True} + handlers.decode_list_object_v2(parsed, context=context) + self.assertEqual(parsed['Prefix'], u'\xe7\xf6s% asd\x08 c') + + def test_decode_list_objects_v2_does_not_decode_continuationtoken(self): + parsed = { + 'ContinuationToken': "%C3%A7%C3%B6s%25%20asd%08+c", + 'EncodingType': 'url', + } + context = {'encoding_type_auto_set': True} + handlers.decode_list_object_v2(parsed, context=context) + self.assertEqual( + parsed['ContinuationToken'], u"%C3%A7%C3%B6s%25%20asd%08+c") + + def test_decode_list_objects_v2_with_startafter(self): + parsed = { + 'StartAfter': "%C3%A7%C3%B6s%25%20asd%08+c", + 'EncodingType': 'url', + } + context = {'encoding_type_auto_set': True} + handlers.decode_list_object_v2(parsed, context=context) + self.assertEqual(parsed['StartAfter'], u'\xe7\xf6s% asd\x08 c') + + def test_decode_list_objects_v2_with_common_prefixes(self): + parsed = { + 'CommonPrefixes': [{'Prefix': "%C3%A7%C3%B6s%25%20asd%08+c"}], + 'EncodingType': 'url', + } + context = {'encoding_type_auto_set': True} + handlers.decode_list_object_v2(parsed, context=context) + self.assertEqual(parsed['CommonPrefixes'][0]['Prefix'], + u'\xe7\xf6s% asd\x08 c') + def test_get_bucket_location_optional(self): # This handler should no-op if another hook (i.e. stubber) has already # filled in response @@ -1059,7 +1139,7 @@ class TestAddMD5(BaseMD5Test): def test_adds_md5_when_v4(self): credentials = Credentials('key', 'secret') request_signer = RequestSigner( - 's3', 'us-east-1', 's3', 'v4', credentials, mock.Mock()) + ServiceId('s3'), 'us-east-1', 's3', 'v4', credentials, mock.Mock()) request_dict = {'body': b'bar', 'url': 'https://s3.us-east-1.amazonaws.com', 'method': 'PUT', @@ -1072,7 +1152,8 @@ class TestAddMD5(BaseMD5Test): def test_adds_md5_when_s3v4(self): credentials = Credentials('key', 'secret') request_signer = RequestSigner( - 's3', 'us-east-1', 's3', 's3v4', credentials, mock.Mock()) + ServiceId('s3'), 'us-east-1', 's3', 's3v4', credentials, + mock.Mock()) request_dict = {'body': b'bar', 'url': 'https://s3.us-east-1.amazonaws.com', 'method': 'PUT', @@ -1101,7 +1182,7 @@ class TestAddMD5(BaseMD5Test): def test_add_md5_raises_error_when_md5_unavailable(self): credentials = Credentials('key', 'secret') request_signer = RequestSigner( - 's3', 'us-east-1', 's3', 's3', credentials, mock.Mock()) + ServiceId('s3'), 'us-east-1', 's3', 's3', credentials, mock.Mock()) request_dict = {'body': b'bar', 'url': 'https://s3.us-east-1.amazonaws.com', 'method': 'PUT', @@ -1115,7 +1196,7 @@ class TestAddMD5(BaseMD5Test): def test_adds_md5_when_s3v2(self): credentials = Credentials('key', 'secret') request_signer = RequestSigner( - 's3', 'us-east-1', 's3', 's3', credentials, mock.Mock()) + ServiceId('s3'), 'us-east-1', 's3', 's3', credentials, mock.Mock()) request_dict = {'body': b'bar', 'url': 'https://s3.us-east-1.amazonaws.com', 'method': 'PUT', diff --git a/tests/unit/test_hooks.py b/tests/unit/test_hooks.py index 72495525..885686c2 100644 --- a/tests/unit/test_hooks.py +++ b/tests/unit/test_hooks.py @@ -17,7 +17,7 @@ from tests import unittest from functools import partial from botocore.hooks import HierarchicalEmitter, first_non_none_response -from botocore.hooks import AliasedEventEmitter +from botocore.hooks import EventAliaser class TestHierarchicalEventEmitter(unittest.TestCase): @@ -62,16 +62,20 @@ class TestHierarchicalEventEmitter(unittest.TestCase): self.assertEqual(calls, ['foo.bar.baz', 'foo.bar', 'foo']) -class TestAliasedEventEmitter(unittest.TestCase): +class TestAliasedEmitter(unittest.TestCase): def setUp(self): self.hook_calls = [] def hook(self, **kwargs): self.hook_calls.append(kwargs) + def get_emitter(self, event_aliases): + emitter = HierarchicalEmitter() + return EventAliaser(emitter, event_aliases) + def test_event_emitted(self): aliases = {'bar': 'bear'} - emitter = AliasedEventEmitter(event_aliases=aliases) + emitter = self.get_emitter(event_aliases=aliases) emitter.register('foo.bear.baz', self.hook) emitter.emit('foo.bear.baz') calls = [e['event_name'] for e in self.hook_calls] @@ -79,23 +83,39 @@ class TestAliasedEventEmitter(unittest.TestCase): def test_aliased_event_emitted(self): aliases = {'bar': 'bear'} - emitter = AliasedEventEmitter(event_aliases=aliases) + emitter = self.get_emitter(event_aliases=aliases) emitter.register('foo.bear.baz', self.hook) emitter.emit('foo.bar.baz') calls = [e['event_name'] for e in self.hook_calls] self.assertEqual(calls, ['foo.bear.baz']) + def test_alias_with_dots_emitted(self): + aliases = {'api.bar': 'bear'} + emitter = self.get_emitter(event_aliases=aliases) + emitter.register('foo.bear.baz', self.hook) + emitter.emit('foo.api.bar.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + def test_aliased_event_registered(self): aliases = {'bar': 'bear'} - emitter = AliasedEventEmitter(event_aliases=aliases) + emitter = self.get_emitter(event_aliases=aliases) emitter.register('foo.bar.baz', self.hook) emitter.emit('foo.bear.baz') calls = [e['event_name'] for e in self.hook_calls] self.assertEqual(calls, ['foo.bear.baz']) + def test_aliased_event_with_dots_registered(self): + aliases = {'api.bar': 'bear'} + emitter = self.get_emitter(event_aliases=aliases) + emitter.register('foo.api.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + def test_event_unregistered(self): aliases = {'bar': 'bear'} - emitter = AliasedEventEmitter(event_aliases=aliases) + emitter = self.get_emitter(event_aliases=aliases) emitter.register('foo.bar.baz', self.hook) emitter.emit('foo.bear.baz') @@ -110,7 +130,7 @@ class TestAliasedEventEmitter(unittest.TestCase): def test_aliased_event_unregistered(self): aliases = {'bar': 'bear'} - emitter = AliasedEventEmitter(event_aliases=aliases) + emitter = self.get_emitter(event_aliases=aliases) emitter.register('foo.bar.baz', self.hook) emitter.emit('foo.bear.baz') @@ -123,6 +143,21 @@ class TestAliasedEventEmitter(unittest.TestCase): calls = [e['event_name'] for e in self.hook_calls] self.assertEqual(calls, []) + def test_aliased_event_with_dots_unregistered(self): + aliases = {'api.bar': 'bear'} + emitter = self.get_emitter(event_aliases=aliases) + + emitter.register('foo.api.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, ['foo.bear.baz']) + + self.hook_calls = [] + emitter.unregister('foo.api.bar.baz', self.hook) + emitter.emit('foo.bear.baz') + calls = [e['event_name'] for e in self.hook_calls] + self.assertEqual(calls, []) + class TestStopProcessing(unittest.TestCase): def setUp(self): diff --git a/tests/unit/test_http_client_exception_mapping.py b/tests/unit/test_http_client_exception_mapping.py new file mode 100644 index 00000000..9eee38c4 --- /dev/null +++ b/tests/unit/test_http_client_exception_mapping.py @@ -0,0 +1,27 @@ +from nose.tools import assert_raises + +from botocore import exceptions as botocore_exceptions +from botocore.vendored.requests import exceptions as requests_exceptions +from botocore.vendored.requests.packages.urllib3 import exceptions as urllib3_exceptions + +EXCEPTION_MAPPING = [ + (botocore_exceptions.ReadTimeoutError, requests_exceptions.ReadTimeout), + (botocore_exceptions.ReadTimeoutError, urllib3_exceptions.ReadTimeoutError), + (botocore_exceptions.ConnectTimeoutError, requests_exceptions.ConnectTimeout), + (botocore_exceptions.ProxyConnectionError, requests_exceptions.ProxyError), + (botocore_exceptions.SSLError, requests_exceptions.SSLError), +] + + +def _raise_exception(exception): + raise exception(endpoint_url=None, proxy_url=None, error=None) + + +def _test_exception_mapping(new_exception, old_exception): + # assert that the new exception can still be caught by the old vendored one + assert_raises(old_exception, _raise_exception, new_exception) + + +def test_http_client_exception_mapping(): + for new_exception, old_exception in EXCEPTION_MAPPING: + yield _test_exception_mapping, new_exception, old_exception diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py new file mode 100644 index 00000000..5568a681 --- /dev/null +++ b/tests/unit/test_http_session.py @@ -0,0 +1,217 @@ +from mock import patch, Mock, ANY +from tests import unittest +from nose.tools import raises +from urllib3.exceptions import NewConnectionError, ProtocolError + +from botocore.vendored import six +from botocore.awsrequest import AWSRequest +from botocore.awsrequest import AWSHTTPConnectionPool, AWSHTTPSConnectionPool +from botocore.httpsession import get_cert_path +from botocore.httpsession import URLLib3Session, ProxyConfiguration +from botocore.exceptions import ConnectionClosedError, EndpointConnectionError + + +class TestProxyConfiguration(unittest.TestCase): + def setUp(self): + self.url = 'http://localhost/' + self.auth_url = 'http://user:pass@localhost/' + self.proxy_config = ProxyConfiguration( + proxies={'http': 'http://localhost:8081/'} + ) + + def update_http_proxy(self, url): + self.proxy_config = ProxyConfiguration( + proxies={'http': url} + ) + + def test_construct_proxy_headers_with_auth(self): + headers = self.proxy_config.proxy_headers_for(self.auth_url) + proxy_auth = headers.get('Proxy-Authorization') + self.assertEqual('Basic dXNlcjpwYXNz', proxy_auth) + + def test_construct_proxy_headers_without_auth(self): + headers = self.proxy_config.proxy_headers_for(self.url) + self.assertEqual({}, headers) + + def test_proxy_for_url_no_slashes(self): + self.update_http_proxy('localhost:8081/') + proxy_url = self.proxy_config.proxy_url_for(self.url) + self.assertEqual('http://localhost:8081/', proxy_url) + + def test_proxy_for_url_no_protocol(self): + self.update_http_proxy('//localhost:8081/') + proxy_url = self.proxy_config.proxy_url_for(self.url) + self.assertEqual('http://localhost:8081/', proxy_url) + + def test_fix_proxy_url_has_protocol_http(self): + proxy_url = self.proxy_config.proxy_url_for(self.url) + self.assertEqual('http://localhost:8081/', proxy_url) + + +class TestHttpSessionUtils(unittest.TestCase): + def test_get_cert_path_path(self): + path = '/some/path' + cert_path = get_cert_path(path) + self.assertEqual(path, cert_path) + + def test_get_cert_path_certifi_or_default(self): + with patch('botocore.httpsession.where') as where: + path = '/bundle/path' + where.return_value = path + cert_path = get_cert_path(True) + self.assertEqual(path, cert_path) + + +class TestURLLib3Session(unittest.TestCase): + def setUp(self): + self.request = AWSRequest( + method='GET', + url='http://example.com/', + headers={}, + data=b'', + ) + + self.response = Mock() + self.response.headers = {} + self.response.stream.return_value = b'' + + self.pool_manager = Mock() + self.connection = Mock() + self.connection.urlopen.return_value = self.response + self.pool_manager.connection_from_url.return_value = self.connection + + self.pool_patch = patch('botocore.httpsession.PoolManager') + self.proxy_patch = patch('botocore.httpsession.proxy_from_url') + self.pool_manager_cls = self.pool_patch.start() + self.proxy_manager_fun = self.proxy_patch.start() + self.pool_manager_cls.return_value = self.pool_manager + self.proxy_manager_fun.return_value = self.pool_manager + + def tearDown(self): + self.pool_patch.stop() + + def assert_request_sent(self, headers=None, body=None, url='/'): + if headers is None: + headers = {} + + self.connection.urlopen.assert_called_once_with( + method=self.request.method, + url=url, + body=body, + headers=headers, + retries=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + ) + + def test_forwards_max_pool_size(self): + URLLib3Session(max_pool_connections=22) + self.pool_manager_cls.assert_called_with( + maxsize=22, + timeout=ANY, + strict=True, + ssl_context=ANY, + ) + + def test_basic_request(self): + session = URLLib3Session() + session.send(self.request.prepare()) + self.assert_request_sent() + self.response.stream.assert_called_once_with() + + def test_basic_streaming_request(self): + session = URLLib3Session() + self.request.stream_output = True + session.send(self.request.prepare()) + self.assert_request_sent() + self.response.stream.assert_not_called() + + def test_basic_https_request(self): + session = URLLib3Session() + self.request.url = 'https://example.com/' + session.send(self.request.prepare()) + self.assert_request_sent() + + def test_basic_https_proxy_request(self): + proxies = {'https': 'http://proxy.com'} + session = URLLib3Session(proxies=proxies) + self.request.url = 'https://example.com/' + session.send(self.request.prepare()) + self.proxy_manager_fun.assert_any_call( + proxies['https'], + proxy_headers={}, + maxsize=ANY, + timeout=ANY, + strict=True, + ssl_context=ANY, + ) + self.assert_request_sent() + + def test_basic_proxy_request_caches_manager(self): + proxies = {'https': 'http://proxy.com'} + session = URLLib3Session(proxies=proxies) + self.request.url = 'https://example.com/' + session.send(self.request.prepare()) + # assert we created the proxy manager + self.proxy_manager_fun.assert_any_call( + proxies['https'], + proxy_headers={}, + maxsize=ANY, + timeout=ANY, + strict=True, + ssl_context=ANY, + ) + session.send(self.request.prepare()) + # assert that we did not create another proxy manager + self.assertEqual(self.proxy_manager_fun.call_count, 1) + + def test_basic_http_proxy_request(self): + proxies = {'http': 'http://proxy.com'} + session = URLLib3Session(proxies=proxies) + session.send(self.request.prepare()) + self.proxy_manager_fun.assert_any_call( + proxies['http'], + proxy_headers={}, + maxsize=ANY, + timeout=ANY, + strict=True, + ssl_context=ANY, + ) + self.assert_request_sent(url=self.request.url) + + def test_ssl_context_is_explicit(self): + session = URLLib3Session() + session.send(self.request.prepare()) + _, manager_kwargs = self.pool_manager_cls.call_args + self.assertIsNotNone(manager_kwargs.get('ssl_context')) + + def test_proxy_request_ssl_context_is_explicit(self): + proxies = {'http': 'http://proxy.com'} + session = URLLib3Session(proxies=proxies) + session.send(self.request.prepare()) + _, proxy_kwargs = self.proxy_manager_fun.call_args + self.assertIsNotNone(proxy_kwargs.get('ssl_context')) + + def make_request_with_error(self, error): + self.connection.urlopen.side_effect = error + session = URLLib3Session() + session.send(self.request.prepare()) + + @raises(EndpointConnectionError) + def test_catches_new_connection_error(self): + error = NewConnectionError(None, None) + self.make_request_with_error(error) + + @raises(ConnectionClosedError) + def test_catches_bad_status_line(self): + error = ProtocolError(None) + self.make_request_with_error(error) + + def test_aws_connection_classes_are_used(self): + session = URLLib3Session() + # ensure the pool manager is using the correct classes + http_class = self.pool_manager.pool_classes_by_scheme.get('http') + self.assertIs(http_class, AWSHTTPConnectionPool) + https_class = self.pool_manager.pool_classes_by_scheme.get('https') + self.assertIs(https_class, AWSHTTPSConnectionPool) diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py index c48df029..e6d96b8c 100644 --- a/tests/unit/test_model.py +++ b/tests/unit/test_model.py @@ -30,6 +30,16 @@ def test_missing_model_attribute_raises_exception(): yield _test_attribute_raise_exception, name +class TestServiceId(unittest.TestCase): + def test_hypenize_replaces_spaces(self): + self.assertEqual( + model.ServiceId('my service').hyphenize(), 'my-service' + ) + + def test_hyphenize_lower_cases(self): + self.assertEqual(model.ServiceId('MyService').hyphenize(), 'myservice') + + class TestServiceModel(unittest.TestCase): def setUp(self): @@ -61,6 +71,10 @@ class TestServiceModel(unittest.TestCase): def test_service_id(self): self.assertEqual(self.service_model.service_id, 'MyService') + def test_hyphenize_service_id(self): + self.assertEqual( + self.service_model.service_id.hyphenize(), 'myservice') + def test_operation_does_not_exist(self): with self.assertRaises(model.OperationNotFoundError): self.service_model.operation_model('NoExistOperation') @@ -75,6 +89,10 @@ class TestServiceModel(unittest.TestCase): def test_shape_names(self): self.assertEqual(self.service_model.shape_names, ['StringShape']) + def test_repr_has_service_name(self): + self.assertEqual(repr(self.service_model), + 'ServiceModel(endpoint-prefix)') + class TestOperationModelFromService(unittest.TestCase): def setUp(self): diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py index 338e1157..93a2739c 100644 --- a/tests/unit/test_parsers.py +++ b/tests/unit/test_parsers.py @@ -379,9 +379,15 @@ class TestHeaderResponseInclusion(unittest.TestCase): parsed = parser.parse( {'body': b'{}', 'headers': headers, 'status_code': 200}, output_shape) + # The mapped header's keys should all be lower cased + parsed_headers = { + 'x-amzn-requestid': 'request-id', + 'header1': 'foo', + 'header2': 'bar', + } # Response headers should be mapped as HTTPHeaders. self.assertEqual( - parsed['ResponseMetadata']['HTTPHeaders'], headers) + parsed['ResponseMetadata']['HTTPHeaders'], parsed_headers) def test_can_always_json_serialize_headers(self): parser = self.create_parser() @@ -399,7 +405,7 @@ class TestHeaderResponseInclusion(unittest.TestCase): # response. So we want to ensure that despite using a CustomHeaderDict # we can always JSON dumps the response metadata. self.assertEqual( - json.loads(json.dumps(metadata))['HTTPHeaders']['Header1'], 'foo') + json.loads(json.dumps(metadata))['HTTPHeaders']['header1'], 'foo') class TestResponseParsingDatetimes(unittest.TestCase): diff --git a/tests/unit/test_response.py b/tests/unit/test_response.py index 39daf58d..195ef935 100644 --- a/tests/unit/test_response.py +++ b/tests/unit/test_response.py @@ -15,12 +15,13 @@ from tests.unit import BaseResponseTest import datetime from dateutil.tz import tzutc +from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError import botocore from botocore import response from botocore.compat import six -from botocore.exceptions import IncompleteReadError -from botocore.vendored.requests.models import Response, Request +from botocore.exceptions import IncompleteReadError, ReadTimeoutError +from botocore.awsrequest import AWSRequest, AWSResponse XMLBODY1 = (b'' b'AccessDenied' @@ -136,23 +137,42 @@ class TestStreamWrapper(unittest.TestCase): [b'1234567890', b'1234567890', b'12345'], ) + def test_catches_urllib3_read_timeout(self): + class TimeoutBody(object): + def read(*args, **kwargs): + raise URLLib3ReadTimeoutError(None, None, None) + + def geturl(*args, **kwargs): + return 'http://example.com' + + stream = response.StreamingBody(TimeoutBody(), content_length=None) + with self.assertRaises(ReadTimeoutError): + stream.read() + + +class FakeRawResponse(six.BytesIO): + def stream(self, amt=1024, decode_content=None): + while True: + chunk = self.read(amt) + if not chunk: + break + yield chunk + class TestGetResponse(BaseResponseTest): maxDiff = None def test_get_response_streaming_ok(self): - http_response = Response() - http_response.headers = { + headers = { 'content-type': 'image/png', 'server': 'AmazonS3', 'AcceptRanges': 'bytes', 'transfer-encoding': 'chunked', 'ETag': '"00000000000000000000000000000000"', } - http_response.raw = six.BytesIO(b'\x89PNG\r\n\x1a\n\x00\x00') + raw = FakeRawResponse(b'\x89PNG\r\n\x1a\n\x00\x00') - http_response.status_code = 200 - http_response.reason = 'OK' + http_response = AWSResponse(None, 200, headers, raw) session = botocore.session.get_session() service_model = session.get_service_model('s3') @@ -164,17 +184,15 @@ class TestGetResponse(BaseResponseTest): '"00000000000000000000000000000000"') def test_get_response_streaming_ng(self): - http_response = Response() - http_response.headers = { + headers = { 'content-type': 'application/xml', 'date': 'Sat, 08 Mar 2014 12:05:44 GMT', 'server': 'AmazonS3', 'transfer-encoding': 'chunked', 'x-amz-id-2': 'AAAAAAAAAAAAAAAAAAA', 'x-amz-request-id': 'XXXXXXXXXXXXXXXX'} - http_response.raw = six.BytesIO(XMLBODY1) - http_response.status_code = 403 - http_response.reason = 'Forbidden' + raw = FakeRawResponse(XMLBODY1) + http_response = AWSResponse(None, 403, headers, raw) session = botocore.session.get_session() service_model = session.get_service_model('s3') @@ -191,18 +209,15 @@ class TestGetResponse(BaseResponseTest): ) def test_get_response_nonstreaming_ok(self): - http_response = Response() - http_response.headers = { + headers = { 'content-type': 'application/xml', 'date': 'Sun, 09 Mar 2014 02:55:43 GMT', 'server': 'AmazonS3', 'transfer-encoding': 'chunked', 'x-amz-id-2': 'AAAAAAAAAAAAAAAAAAA', 'x-amz-request-id': 'XXXXXXXXXXXXXXXX'} - http_response.raw = six.BytesIO(XMLBODY1) - http_response.status_code = 403 - http_response.reason = 'Forbidden' - http_response.request = Request() + raw = FakeRawResponse(XMLBODY1) + http_response = AWSResponse(None, 403, headers, raw) session = botocore.session.get_session() service_model = session.get_service_model('s3') @@ -223,18 +238,15 @@ class TestGetResponse(BaseResponseTest): }) def test_get_response_nonstreaming_ng(self): - http_response = Response() - http_response.headers = { + headers = { 'content-type': 'application/xml', 'date': 'Sat, 08 Mar 2014 12:05:44 GMT', 'server': 'AmazonS3', 'transfer-encoding': 'chunked', 'x-amz-id-2': 'AAAAAAAAAAAAAAAAAAA', 'x-amz-request-id': 'XXXXXXXXXXXXXXXX'} - http_response.raw = six.BytesIO(XMLBODY2) - http_response.status_code = 200 - http_response.reason = 'ok' - http_response.request = Request() + raw = FakeRawResponse(XMLBODY2) + http_response = AWSResponse(None, 200, headers, raw) session = botocore.session.get_session() service_model = session.get_service_model('s3') diff --git a/tests/unit/test_retryhandler.py b/tests/unit/test_retryhandler.py index 3f31a69f..eedd7718 100644 --- a/tests/unit/test_retryhandler.py +++ b/tests/unit/test_retryhandler.py @@ -16,11 +16,11 @@ from tests import unittest import mock -from botocore.vendored.requests import ConnectionError, Timeout -from botocore.vendored.requests.packages.urllib3.exceptions import ClosedPoolError from botocore import retryhandler -from botocore.exceptions import ChecksumError +from botocore.exceptions import ( + ChecksumError, EndpointConnectionError, ReadTimeoutError, +) HTTP_500_RESPONSE = mock.Mock() @@ -202,12 +202,13 @@ class TestCreateRetryConfiguration(unittest.TestCase): def test_create_retry_handler_with_socket_errors(self): handler = retryhandler.create_retry_handler( self.retry_config, operation_name='OperationBar') - with self.assertRaises(ConnectionError): + exception = EndpointConnectionError(endpoint_url='') + with self.assertRaises(EndpointConnectionError): handler(response=None, attempts=10, - caught_exception=ConnectionError()) + caught_exception=exception) # No connection error raised because attempts < max_attempts. sleep_time = handler(response=None, attempts=1, - caught_exception=ConnectionError()) + caught_exception=exception) self.assertEqual(sleep_time, 1) # But any other exception should be raised even if # attempts < max_attempts. @@ -221,24 +222,9 @@ class TestCreateRetryConfiguration(unittest.TestCase): handler = retryhandler.create_retry_handler( self.retry_config, operation_name='OperationBar') sleep_time = handler(response=None, attempts=1, - caught_exception=Timeout()) + caught_exception=ReadTimeoutError(endpoint_url='')) self.assertEqual(sleep_time, 1) - def test_retry_pool_closed_errors(self): - # A ClosedPoolError is retried (this is a workaround for a urllib3 - # bug). Can be removed once we upgrade to requests 2.0.0. - handler = retryhandler.create_retry_handler( - self.retry_config, operation_name='OperationBar') - # 4th attempt is retried. - sleep_time = handler( - response=None, attempts=4, - caught_exception=ClosedPoolError('FakePool', 'Message')) - self.assertEqual(sleep_time, 8) - # But the 5th time propogates the error. - with self.assertRaises(ClosedPoolError): - handler(response=None, attempts=10, - caught_exception=ClosedPoolError('FakePool', 'Message')) - def test_create_retry_handler_with_no_operation(self): handler = retryhandler.create_retry_handler( self.retry_config, operation_name=None) diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py index b39dddb8..a7bfe8e8 100644 --- a/tests/unit/test_s3_addressing.py +++ b/tests/unit/test_s3_addressing.py @@ -15,7 +15,7 @@ import os -from tests import BaseSessionTest +from tests import BaseSessionTest, ClientHTTPStubber from mock import patch, Mock from botocore.compat import OrderedDict @@ -29,25 +29,18 @@ class TestS3Addressing(BaseSessionTest): self.region_name = 'us-east-1' self.signature_version = 's3' - self.mock_response = Mock() - self.mock_response.content = '' - self.mock_response.headers = {} - self.mock_response.status_code = 200 self.session.unregister('before-parameter-build.s3.ListObjects', set_list_objects_encoding_type_url) - def get_prepared_request(self, operation, params, - force_hmacv1=False): + def get_prepared_request(self, operation, params, force_hmacv1=False): if force_hmacv1: self.session.register('choose-signer', self.enable_hmacv1) - with patch('botocore.endpoint.BotocoreHTTPSession') as \ - mock_http_session: - mock_send = mock_http_session.return_value.send - mock_send.return_value = self.mock_response - client = self.session.create_client('s3', self.region_name) + client = self.session.create_client('s3', self.region_name) + with ClientHTTPStubber(client) as http_stubber: + http_stubber.add_response() getattr(client, operation)(**params) # Return the request that was sent over the wire. - return mock_send.call_args[0][0] + return http_stubber.requests[0] def enable_hmacv1(self, **kwargs): return 's3' diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 46bd4b76..ea6fc8cf 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -391,7 +391,9 @@ class TestConfigLoaderObject(BaseSessionTest): class TestGetServiceModel(BaseSessionTest): def test_get_service_model(self): loader = mock.Mock() - loader.load_service_model.return_value = {} + loader.load_service_model.return_value = { + 'metadata': {'serviceId': 'foo'} + } self.session.register_component('data_loader', loader) model = self.session.get_service_model('made_up') self.assertIsInstance(model, ServiceModel) diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py index c0ad4d9a..ffbed212 100644 --- a/tests/unit/test_signers.py +++ b/tests/unit/test_signers.py @@ -23,6 +23,7 @@ from botocore.config import Config from botocore.credentials import Credentials from botocore.credentials import ReadOnlyCredentials from botocore.hooks import HierarchicalEmitter +from botocore.model import ServiceId from botocore.exceptions import NoRegionError, UnknownSignatureVersionError from botocore.exceptions import UnknownClientMethodError, ParamValidationError from botocore.exceptions import UnsupportedSignatureVersionError @@ -39,7 +40,7 @@ class BaseSignerTest(unittest.TestCase): self.emitter = mock.Mock() self.emitter.emit_until_response.return_value = (None, None) self.signer = RequestSigner( - 'service_name', 'region_name', 'signing_name', + ServiceId('service_name'), 'region_name', 'signing_name', 'v4', self.credentials, self.emitter) self.fixed_credentials = self.credentials.get_frozen_credentials() @@ -57,8 +58,9 @@ class TestSigner(BaseSignerTest): def test_region_required_for_sigv4(self): self.signer = RequestSigner( - 'service_name', None, 'signing_name', 'v4', self.credentials, - self.emitter) + ServiceId('service_name'), None, 'signing_name', 'v4', + self.credentials, self.emitter + ) with self.assertRaises(NoRegionError): self.signer.sign('operation_name', mock.Mock()) @@ -288,7 +290,7 @@ class TestSigner(BaseSignerTest): 'context': {} } self.signer = RequestSigner( - 'service_name', 'region_name', 'signing_name', + ServiceId('service_name'), 'region_name', 'signing_name', 'foo', self.credentials, self.emitter) with self.assertRaises(UnsupportedSignatureVersionError): self.signer.generate_presigned_url( @@ -301,7 +303,7 @@ class TestSigner(BaseSignerTest): self.credentials = FakeCredentials('a', 'b', 'c') self.signer = RequestSigner( - 'service_name', 'region_name', 'signing_name', + ServiceId('service_name'), 'region_name', 'signing_name', 'v4', self.credentials, self.emitter) auth_cls = mock.Mock() @@ -322,7 +324,7 @@ class TestSigner(BaseSignerTest): # the error (which they already do). self.credentials = None self.signer = RequestSigner( - 'service_name', 'region_name', 'signing_name', + ServiceId('service_name'), 'region_name', 'signing_name', 'v4', self.credentials, self.emitter) auth_cls = mock.Mock() with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, @@ -557,7 +559,7 @@ class TestS3PostPresigner(BaseSignerTest): def setUp(self): super(TestS3PostPresigner, self).setUp() self.request_signer = RequestSigner( - 'service_name', 'region_name', 'signing_name', + ServiceId('service_name'), 'region_name', 'signing_name', 's3v4', self.credentials, self.emitter) self.signer = S3PostPresigner(self.request_signer) self.request_dict = { @@ -696,7 +698,7 @@ class TestS3PostPresigner(BaseSignerTest): 'context': {} } self.request_signer = RequestSigner( - 'service_name', 'region_name', 'signing_name', + ServiceId('service_name'), 'region_name', 'signing_name', 'foo', self.credentials, self.emitter) self.signer = S3PostPresigner(self.request_signer) with self.assertRaises(UnsupportedSignatureVersionError): diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index a108d5ef..5a4872f1 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -22,10 +22,9 @@ from botocore.compat import OrderedDict, json from botocore.compat import six from botocore.awsrequest import AWSRequest from botocore.exceptions import InvalidExpressionError, ConfigNotFound -from botocore.exceptions import ClientError +from botocore.exceptions import ClientError, ConnectionClosedError from botocore.exceptions import InvalidDNSNameError, MetadataRetrievalError from botocore.model import ServiceModel -from botocore.vendored import requests from botocore.utils import is_json_value_header from botocore.utils import remove_dot_segments from botocore.utils import normalize_url_path @@ -45,6 +44,7 @@ from botocore.utils import fix_s3_host from botocore.utils import switch_to_virtual_host_style from botocore.utils import instance_cache from botocore.utils import merge_dicts +from botocore.utils import lowercase_dict from botocore.utils import get_service_module_name from botocore.utils import percent_encode_sequence from botocore.utils import percent_encode @@ -990,6 +990,33 @@ class TestMergeDicts(unittest.TestCase): dict1, {'Foo': ['foo_value']}) +class TestLowercaseDict(unittest.TestCase): + def test_lowercase_dict_empty(self): + original = {} + copy = lowercase_dict(original) + self.assertEqual(original, copy) + + def test_lowercase_dict_original_keys_lower(self): + original = { + 'lower_key1': 1, + 'lower_key2': 2, + } + copy = lowercase_dict(original) + self.assertEqual(original, copy) + + def test_lowercase_dict_original_keys_mixed(self): + original = { + 'SOME_KEY': 'value', + 'AnOTher_OnE': 'anothervalue', + } + copy = lowercase_dict(original) + expected = { + 'some_key': 'value', + 'another_one': 'anothervalue', + } + self.assertEqual(expected, copy) + + class TestGetServiceModuleName(unittest.TestCase): def setUp(self): self.service_description = { @@ -1547,7 +1574,7 @@ class TestContainerMetadataFetcher(unittest.TestCase): def fake_response(self, status_code, body): response = mock.Mock() response.status_code = status_code - response.text = body + response.content = body return response def set_http_responses_to(self, *responses): @@ -1561,9 +1588,15 @@ class TestContainerMetadataFetcher(unittest.TestCase): http_response = response else: http_response = self.fake_response( - status_code=200, body=json.dumps(response)) + status_code=200, body=json.dumps(response).encode('utf-8')) http_responses.append(http_response) - self.http.get.side_effect = http_responses + self.http.send.side_effect = http_responses + + def assert_request(self, method, url, headers): + request = self.http.send.call_args[0][0] + self.assertEqual(request.method, method) + self.assertEqual(request.url, url) + self.assertEqual(request.headers, headers) def assert_can_retrieve_metadata_from(self, full_uri): response_body = {'foo': 'bar'} @@ -1571,10 +1604,7 @@ class TestContainerMetadataFetcher(unittest.TestCase): fetcher = self.create_fetcher() response = fetcher.retrieve_full_uri(full_uri) self.assertEqual(response, response_body) - self.http.get.assert_called_with( - full_uri, headers={'Accept': 'application/json'}, - timeout=fetcher.TIMEOUT_SECONDS, - ) + self.assert_request('GET', full_uri, {'Accept': 'application/json'}) def assert_host_is_not_allowed(self, full_uri): response_body = {'foo': 'bar'} @@ -1582,13 +1612,7 @@ class TestContainerMetadataFetcher(unittest.TestCase): fetcher = self.create_fetcher() with self.assertRaisesRegexp(ValueError, 'Unsupported host'): fetcher.retrieve_full_uri(full_uri) - self.assertFalse(self.http.get.called) - - def test_default_session_disables_proxies(self): - with mock.patch('botocore.utils.requests.Session') as session: - fetcher = ContainerMetadataFetcher() - self.assertFalse(session.return_value.trust_env) - self.assertEqual(session.return_value.proxies, {}) + self.assertFalse(self.http.send.called) def test_can_specify_extra_headers_are_merged(self): headers = { @@ -1601,10 +1625,7 @@ class TestContainerMetadataFetcher(unittest.TestCase): fetcher = self.create_fetcher() response = fetcher.retrieve_full_uri( 'http://localhost', headers) - self.http.get.assert_called_with( - 'http://localhost', headers=headers, - timeout=fetcher.TIMEOUT_SECONDS, - ) + self.assert_request('GET', 'http://localhost', headers) def test_can_retrieve_uri(self): json_body = { @@ -1620,11 +1641,8 @@ class TestContainerMetadataFetcher(unittest.TestCase): self.assertEqual(response, json_body) # Ensure we made calls to the right endpoint. - self.http.get.assert_called_with( - 'http://169.254.170.2/foo?id=1', - headers={'Accept': 'application/json'}, - timeout=fetcher.TIMEOUT_SECONDS, - ) + headers = {'Accept': 'application/json'} + self.assert_request('GET', 'http://169.254.170.2/foo?id=1', headers) def test_can_retry_requests(self): success_response = { @@ -1636,7 +1654,7 @@ class TestContainerMetadataFetcher(unittest.TestCase): self.set_http_responses_to( # First response is a connection error, should # be retried. - requests.ConnectionError(), + ConnectionClosedError(endpoint_url=''), # Second response is the successful JSON response # with credentials. success_response, @@ -1648,44 +1666,44 @@ class TestContainerMetadataFetcher(unittest.TestCase): def test_propagates_credential_error_on_http_errors(self): self.set_http_responses_to( # In this scenario, we never get a successful response. - requests.ConnectionError(), - requests.ConnectionError(), - requests.ConnectionError(), - requests.ConnectionError(), - requests.ConnectionError(), + ConnectionClosedError(endpoint_url=''), + ConnectionClosedError(endpoint_url=''), + ConnectionClosedError(endpoint_url=''), + ConnectionClosedError(endpoint_url=''), + ConnectionClosedError(endpoint_url=''), ) # As a result, we expect an appropriate error to be raised. fetcher = self.create_fetcher() with self.assertRaises(MetadataRetrievalError): fetcher.retrieve_uri('/foo?id=1') - self.assertEqual(self.http.get.call_count, fetcher.RETRY_ATTEMPTS) + self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS) def test_error_raised_on_non_200_response(self): self.set_http_responses_to( - self.fake_response(status_code=404, body='Error not found'), - self.fake_response(status_code=404, body='Error not found'), - self.fake_response(status_code=404, body='Error not found'), + self.fake_response(status_code=404, body=b'Error not found'), + self.fake_response(status_code=404, body=b'Error not found'), + self.fake_response(status_code=404, body=b'Error not found'), ) fetcher = self.create_fetcher() with self.assertRaises(MetadataRetrievalError): fetcher.retrieve_uri('/foo?id=1') # Should have tried up to RETRY_ATTEMPTS. - self.assertEqual(self.http.get.call_count, fetcher.RETRY_ATTEMPTS) + self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS) def test_error_raised_on_no_json_response(self): # If the service returns a sucess response but with a body that # does not contain JSON, we should still retry up to RETRY_ATTEMPTS, # but after exhausting retries we propagate the exception. self.set_http_responses_to( - self.fake_response(status_code=200, body='Not JSON'), - self.fake_response(status_code=200, body='Not JSON'), - self.fake_response(status_code=200, body='Not JSON'), + self.fake_response(status_code=200, body=b'Not JSON'), + self.fake_response(status_code=200, body=b'Not JSON'), + self.fake_response(status_code=200, body=b'Not JSON'), ) fetcher = self.create_fetcher() with self.assertRaises(MetadataRetrievalError): fetcher.retrieve_uri('/foo?id=1') # Should have tried up to RETRY_ATTEMPTS. - self.assertEqual(self.http.get.call_count, fetcher.RETRY_ATTEMPTS) + self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS) def test_can_retrieve_full_uri_with_fixed_ip(self): self.assert_can_retrieve_metadata_from( @@ -1732,25 +1750,26 @@ class TestUnsigned(unittest.TestCase): class TestInstanceMetadataFetcher(unittest.TestCase): def setUp(self): - self._requests_patch = mock.patch('botocore.utils.requests') - self._requests = self._requests_patch.start() + urllib3_session_send = 'botocore.httpsession.URLLib3Session.send' + self._urllib3_patch = mock.patch(urllib3_session_send) + self._send = self._urllib3_patch.start() def tearDown(self): - self._requests_patch.stop() + self._urllib3_patch.stop() def test_disabled_by_environment(self): env = {'AWS_EC2_METADATA_DISABLED': 'true'} fetcher = InstanceMetadataFetcher(env=env) result = fetcher.retrieve_iam_role_credentials() self.assertEqual(result, {}) - self._requests.assert_not_called() + self._send.assert_not_called() def test_disabled_by_environment_mixed_case(self): env = {'AWS_EC2_METADATA_DISABLED': 'tRuE'} fetcher = InstanceMetadataFetcher(env=env) result = fetcher.retrieve_iam_role_credentials() self.assertEqual(result, {}) - self._requests.get.assert_not_called() + self._send.assert_not_called() def test_disabling_env_var_not_true(self): url = 'https://example.com/' @@ -1770,7 +1789,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase): creds_response.status_code = 200 creds_response.content = json.dumps(creds).encode('utf-8') - self._requests.get.side_effect = [profiles_response, creds_response] + self._send.side_effect = [profiles_response, creds_response] fetcher = InstanceMetadataFetcher(url=url, env=env) result = fetcher.retrieve_iam_role_credentials() @@ -1788,5 +1807,5 @@ class TestInstanceMetadataFetcher(unittest.TestCase): user_agent = 'my-user-agent' InstanceMetadataFetcher( user_agent=user_agent).retrieve_iam_role_credentials() - headers = self._requests.get.call_args[1]['headers'] + headers = self._send.call_args[0][0].headers self.assertEqual(headers['User-Agent'], user_agent)